diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-14 20:03:01 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-14 20:03:01 +0000 |
commit | a453ac31f3428614cceb99027f8efbdb9258a40b (patch) | |
tree | f61f87408f32a8511cbd91799f9cececb53e0374 /collections-debian-merged/ansible_collections/community/docker | |
parent | Initial commit. (diff) | |
download | ansible-upstream.tar.xz ansible-upstream.zip |
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collections-debian-merged/ansible_collections/community/docker')
319 files changed, 44623 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/README.md b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/README.md new file mode 100644 index 00000000..385e70ba --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/README.md @@ -0,0 +1,3 @@ +## Azure Pipelines Configuration + +Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information. diff --git a/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/azure-pipelines.yml b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/azure-pipelines.yml new file mode 100644 index 00000000..9c16c788 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/azure-pipelines.yml @@ -0,0 +1,227 @@ +trigger: + batch: true + branches: + include: + - main + - stable-* + +pr: + autoCancel: true + branches: + include: + - main + - stable-* + +schedules: + - cron: 0 9 * * * + displayName: Nightly + always: true + branches: + include: + - main + - stable-* + +variables: + - name: checkoutPath + value: ansible_collections/community/docker + - name: coverageBranches + value: main + - name: pipelinesCoverage + value: coverage + - name: entryPoint + value: tests/utils/shippable/shippable.sh + - name: fetchDepth + value: 0 + +resources: + containers: + - container: default + image: quay.io/ansible/azure-pipelines-test-container:1.7.1 + +pool: Standard + +stages: +### Sanity & units + - stage: Ansible_devel + displayName: Sanity & Units devel + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + targets: + - name: Sanity + test: 'devel/sanity/1' + - name: Sanity Extra # Only on devel + test: 'devel/sanity/extra' + - name: Units + test: 'devel/units/1' + - stage: Ansible_2_10 + displayName: Sanity & Units 2.10 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + targets: + - name: Sanity + test: '2.10/sanity/1' + - name: Units + test: '2.10/units/1' + - stage: Ansible_2_9 + displayName: Sanity & Units 2.9 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + targets: + - name: Sanity + test: '2.9/sanity/1' + - name: Units + test: '2.9/units/1' +### Docker + - stage: Docker_devel + displayName: Docker devel + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: devel/linux/{0} + targets: + - name: CentOS 7 + test: centos7 + - name: CentOS 8 + test: centos8 + - name: Fedora 32 + test: fedora32 + - name: Fedora 33 + test: fedora33 + - name: openSUSE 15 py2 + test: opensuse15py2 + - name: openSUSE 15 py3 + test: opensuse15 + - name: Ubuntu 16.04 + test: ubuntu1604 + - name: Ubuntu 18.04 + test: ubuntu1804 + - name: Ubuntu 20.04 + test: ubuntu2004 + groups: + - 4 + - 5 + - stage: Docker_2_10 + displayName: Docker 2.10 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.10/linux/{0} + targets: + - name: CentOS 7 + test: centos7 + - name: CentOS 8 + test: centos8 + - name: Fedora 31 + test: fedora31 + - name: Fedora 32 + test: fedora32 + - name: openSUSE 15 py2 + test: opensuse15py2 + - name: openSUSE 15 py3 + test: opensuse15 + - name: Ubuntu 16.04 + test: ubuntu1604 + - name: Ubuntu 18.04 + test: ubuntu1804 + groups: + - 4 + - 5 + - stage: Docker_2_9 + displayName: Docker 2.9 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.9/linux/{0} + targets: + - name: CentOS 7 + test: centos7 + - name: CentOS 8 + test: centos8 + - name: Fedora 31 + test: fedora31 + + - name: openSUSE 15 py2 + test: opensuse15py2 + - name: openSUSE 15 py3 + test: opensuse15 + - name: Ubuntu 16.04 + test: ubuntu1604 + - name: Ubuntu 18.04 + test: ubuntu1804 + groups: + - 4 + - 5 +### Remote + - stage: Remote_devel + displayName: Remote devel + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: RHEL {0} + testFormat: devel/rhel/{0} + targets: + - test: 7.8 + - test: 8.2 + groups: + - 1 + - 2 + - 3 + - 4 + - 5 + - stage: Remote_2_10 + displayName: Remote 2.10 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: RHEL {0} + testFormat: devel/rhel/{0} + targets: + - test: '8.2' + groups: + - 1 + - 2 + - 3 + - 4 + - stage: Remote_2_9 + displayName: Remote 2.9 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: RHEL {0} + testFormat: devel/rhel/{0} + targets: + - test: '8.2' + groups: + - 1 + - 2 + - 3 + - 4 + + ## Finally + + - stage: Summary + condition: succeededOrFailed() + dependsOn: + - Ansible_devel + - Ansible_2_10 + - Ansible_2_9 + - Remote_devel + - Docker_devel + - Remote_2_10 + - Docker_2_10 + - Remote_2_9 + - Docker_2_9 + jobs: + - template: templates/coverage.yml diff --git a/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/aggregate-coverage.sh b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/aggregate-coverage.sh new file mode 100755 index 00000000..f3113dd0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/aggregate-coverage.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# Aggregate code coverage results for later processing. + +set -o pipefail -eu + +agent_temp_directory="$1" + +PATH="${PWD}/bin:${PATH}" + +mkdir "${agent_temp_directory}/coverage/" + +options=(--venv --venv-system-site-packages --color -v) + +ansible-test coverage combine --export "${agent_temp_directory}/coverage/" "${options[@]}" + +if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then + # Only analyze coverage if the installed version of ansible-test supports it. + # Doing so allows this script to work unmodified for multiple Ansible versions. + ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}" +fi diff --git a/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/combine-coverage.py b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/combine-coverage.py new file mode 100755 index 00000000..506ade64 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/combine-coverage.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +""" +Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job. +Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}" +The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName) +Keep in mind that Azure Pipelines does not enforce unique job display names (only names). +It is up to pipeline authors to avoid name collisions when deviating from the recommended format. +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import re +import shutil +import sys + + +def main(): + """Main program entry point.""" + source_directory = sys.argv[1] + + if '/ansible_collections/' in os.getcwd(): + output_path = "tests/output" + else: + output_path = "test/results" + + destination_directory = os.path.join(output_path, 'coverage') + + if not os.path.exists(destination_directory): + os.makedirs(destination_directory) + + jobs = {} + count = 0 + + for name in os.listdir(source_directory): + match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name) + label = match.group('label') + attempt = int(match.group('attempt')) + jobs[label] = max(attempt, jobs.get(label, 0)) + + for label, attempt in jobs.items(): + name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt) + source = os.path.join(source_directory, name) + source_files = os.listdir(source) + + for source_file in source_files: + source_path = os.path.join(source, source_file) + destination_path = os.path.join(destination_directory, source_file + '.' + label) + print('"%s" -> "%s"' % (source_path, destination_path)) + shutil.copyfile(source_path, destination_path) + count += 1 + + print('Coverage file count: %d' % count) + print('##vso[task.setVariable variable=coverageFileCount]%d' % count) + print('##vso[task.setVariable variable=outputPath]%s' % output_path) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/process-results.sh b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/process-results.sh new file mode 100755 index 00000000..f3f1d1ba --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/process-results.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Check the test results and set variables for use in later steps. + +set -o pipefail -eu + +if [[ "$PWD" =~ /ansible_collections/ ]]; then + output_path="tests/output" +else + output_path="test/results" +fi + +echo "##vso[task.setVariable variable=outputPath]${output_path}" + +if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then + echo "##vso[task.setVariable variable=haveTestResults]true" +fi + +if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then + echo "##vso[task.setVariable variable=haveBotResults]true" +fi + +if compgen -G "${output_path}"'/coverage/*' > /dev/null; then + echo "##vso[task.setVariable variable=haveCoverageData]true" +fi diff --git a/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/publish-codecov.sh b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/publish-codecov.sh new file mode 100755 index 00000000..7aeabda0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/publish-codecov.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# Upload code coverage reports to codecov.io. +# Multiple coverage files from multiple languages are accepted and aggregated after upload. +# Python coverage, as well as PowerShell and Python stubs can all be uploaded. + +set -o pipefail -eu + +output_path="$1" + +curl --silent --show-error https://codecov.io/bash > codecov.sh + +for file in "${output_path}"/reports/coverage*.xml; do + name="${file}" + name="${name##*/}" # remove path + name="${name##coverage=}" # remove 'coverage=' prefix if present + name="${name%.xml}" # remove '.xml' suffix + + bash codecov.sh \ + -f "${file}" \ + -n "${name}" \ + -X coveragepy \ + -X gcov \ + -X fix \ + -X search \ + -X xcode \ + || echo "Failed to upload code coverage report to codecov.io: ${file}" +done diff --git a/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/report-coverage.sh b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/report-coverage.sh new file mode 100755 index 00000000..1bd91bdc --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/report-coverage.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Generate code coverage reports for uploading to Azure Pipelines and codecov.io. + +set -o pipefail -eu + +PATH="${PWD}/bin:${PATH}" + +if ! ansible-test --help >/dev/null 2>&1; then + # Install the devel version of ansible-test for generating code coverage reports. + # This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs). + # Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used. + pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check +fi + +ansible-test coverage xml --stub --venv --venv-system-site-packages --color -v diff --git a/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/run-tests.sh b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/run-tests.sh new file mode 100755 index 00000000..a947fdf0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/run-tests.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +# Configure the test environment and run the tests. + +set -o pipefail -eu + +entry_point="$1" +test="$2" +read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds + +export COMMIT_MESSAGE +export COMPLETE +export COVERAGE +export IS_PULL_REQUEST + +if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then + IS_PULL_REQUEST=true + COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2) +else + IS_PULL_REQUEST= + COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD) +fi + +COMPLETE= +COVERAGE= + +if [ "${BUILD_REASON}" = "Schedule" ]; then + COMPLETE=yes + + if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then + COVERAGE=yes + fi +fi + +"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py" diff --git a/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/time-command.py b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/time-command.py new file mode 100755 index 00000000..5e8eb8d4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/scripts/time-command.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +"""Prepends a relative timestamp to each input line from stdin and writes it to stdout.""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import time + + +def main(): + """Main program entry point.""" + start = time.time() + + sys.stdin.reconfigure(errors='surrogateescape') + sys.stdout.reconfigure(errors='surrogateescape') + + for line in sys.stdin: + seconds = time.time() - start + sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line)) + sys.stdout.flush() + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/templates/coverage.yml b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/templates/coverage.yml new file mode 100644 index 00000000..1864e444 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/templates/coverage.yml @@ -0,0 +1,39 @@ +# This template adds a job for processing code coverage data. +# It will upload results to Azure Pipelines and codecov.io. +# Use it from a job stage that completes after all other jobs have completed. +# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed. + +jobs: + - job: Coverage + displayName: Code Coverage + container: default + workspace: + clean: all + steps: + - checkout: self + fetchDepth: $(fetchDepth) + path: $(checkoutPath) + - task: DownloadPipelineArtifact@2 + displayName: Download Coverage Data + inputs: + path: coverage/ + patterns: "Coverage */*=coverage.combined" + - bash: .azure-pipelines/scripts/combine-coverage.py coverage/ + displayName: Combine Coverage Data + - bash: .azure-pipelines/scripts/report-coverage.sh + displayName: Generate Coverage Report + condition: gt(variables.coverageFileCount, 0) + - task: PublishCodeCoverageResults@1 + inputs: + codeCoverageTool: Cobertura + # Azure Pipelines only accepts a single coverage data file. + # That means only Python or PowerShell coverage can be uploaded, but not both. + # Set the "pipelinesCoverage" variable to determine which type is uploaded. + # Use "coverage" for Python and "coverage-powershell" for PowerShell. + summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml" + displayName: Publish to Azure Pipelines + condition: gt(variables.coverageFileCount, 0) + - bash: .azure-pipelines/scripts/publish-codecov.sh "$(outputPath)" + displayName: Publish to codecov.io + condition: gt(variables.coverageFileCount, 0) + continueOnError: true diff --git a/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/templates/matrix.yml b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/templates/matrix.yml new file mode 100644 index 00000000..4e9555dd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/templates/matrix.yml @@ -0,0 +1,55 @@ +# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template. +# If this matrix template does not provide the required functionality, consider using the test template directly instead. + +parameters: + # A required list of dictionaries, one per test target. + # Each item in the list must contain a "test" or "name" key. + # Both may be provided. If one is omitted, the other will be used. + - name: targets + type: object + + # An optional list of values which will be used to multiply the targets list into a matrix. + # Values can be strings or numbers. + - name: groups + type: object + default: [] + + # An optional format string used to generate the job name. + # - {0} is the name of an item in the targets list. + - name: nameFormat + type: string + default: "{0}" + + # An optional format string used to generate the test name. + # - {0} is the name of an item in the targets list. + - name: testFormat + type: string + default: "{0}" + + # An optional format string used to add the group to the job name. + # {0} is the formatted name of an item in the targets list. + # {{1}} is the group -- be sure to include the double "{{" and "}}". + - name: nameGroupFormat + type: string + default: "{0} - {{1}}" + + # An optional format string used to add the group to the test name. + # {0} is the formatted test of an item in the targets list. + # {{1}} is the group -- be sure to include the double "{{" and "}}". + - name: testGroupFormat + type: string + default: "{0}/{{1}}" + +jobs: + - template: test.yml + parameters: + jobs: + - ${{ if eq(length(parameters.groups), 0) }}: + - ${{ each target in parameters.targets }}: + - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }} + test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }} + - ${{ if not(eq(length(parameters.groups), 0)) }}: + - ${{ each group in parameters.groups }}: + - ${{ each target in parameters.targets }}: + - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }} + test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }} diff --git a/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/templates/test.yml b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/templates/test.yml new file mode 100644 index 00000000..5250ed80 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/.azure-pipelines/templates/test.yml @@ -0,0 +1,45 @@ +# This template uses the provided list of jobs to create test one or more test jobs. +# It can be used directly if needed, or through the matrix template. + +parameters: + # A required list of dictionaries, one per test job. + # Each item in the list must contain a "job" and "name" key. + - name: jobs + type: object + +jobs: + - ${{ each job in parameters.jobs }}: + - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }} + displayName: ${{ job.name }} + container: default + workspace: + clean: all + steps: + - checkout: self + fetchDepth: $(fetchDepth) + path: $(checkoutPath) + - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)" + displayName: Run Tests + - bash: .azure-pipelines/scripts/process-results.sh + condition: succeededOrFailed() + displayName: Process Results + - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)" + condition: eq(variables.haveCoverageData, 'true') + displayName: Aggregate Coverage Data + - task: PublishTestResults@2 + condition: eq(variables.haveTestResults, 'true') + inputs: + testResultsFiles: "$(outputPath)/junit/*.xml" + displayName: Publish Test Results + - task: PublishPipelineArtifact@1 + condition: eq(variables.haveBotResults, 'true') + displayName: Publish Bot Results + inputs: + targetPath: "$(outputPath)/bot/" + artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" + - task: PublishPipelineArtifact@1 + condition: eq(variables.haveCoverageData, 'true') + displayName: Publish Coverage Data + inputs: + targetPath: "$(Agent.TempDirectory)/coverage/" + artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" diff --git a/collections-debian-merged/ansible_collections/community/docker/CHANGELOG.rst b/collections-debian-merged/ansible_collections/community/docker/CHANGELOG.rst new file mode 100644 index 00000000..2a2b18f9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/CHANGELOG.rst @@ -0,0 +1,172 @@ +========================================= +Docker Community Collection Release Notes +========================================= + +.. contents:: Topics + + +v1.2.2 +====== + +Release Summary +--------------- + +Security bugfix release to address CVE-2021-20191. + +Security Fixes +-------------- + +- docker_swarm - enabled ``no_log`` for the option ``signing_ca_key`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.docker/pull/80). + +v1.2.1 +====== + +Release Summary +--------------- + +Bugfix release. + +Bugfixes +-------- + +- docker connection plugin - fix Docker version parsing, as some docker versions have a leading ``v`` in the output of the command ``docker version --format "{{.Server.Version}}"`` (https://github.com/ansible-collections/community.docker/pull/76). + +v1.2.0 +====== + +Release Summary +--------------- + +Feature release with one new feature and two bugfixes. + +Minor Changes +------------- + +- docker_container - added ``default_host_ip`` option which allows to explicitly set the default IP string for published ports without explicitly specified IPs. When using IPv6 binds with Docker 20.10.2 or newer, this needs to be set to an empty string (``""``) (https://github.com/ansible-collections/community.docker/issues/70, https://github.com/ansible-collections/community.docker/pull/71). + +Bugfixes +-------- + +- docker_container - allow IPv6 zones (RFC 4007) in bind IPs (https://github.com/ansible-collections/community.docker/pull/66). +- docker_image - fix crash on loading images with versions of Docker SDK for Python before 2.5.0 (https://github.com/ansible-collections/community.docker/issues/72, https://github.com/ansible-collections/community.docker/pull/73). + +v1.1.0 +====== + +Release Summary +--------------- + +Feature release with three new plugins and modules. + +Minor Changes +------------- + +- docker_container - support specifying ``cgroup_parent`` (https://github.com/ansible-collections/community.docker/issues/6, https://github.com/ansible-collections/community.docker/pull/59). +- docker_container - when a container is started with ``detached=false``, ``status`` is now also returned when it is 0 (https://github.com/ansible-collections/community.docker/issues/26, https://github.com/ansible-collections/community.docker/pull/58). +- docker_image - support ``platform`` when building images (https://github.com/ansible-collections/community.docker/issues/22, https://github.com/ansible-collections/community.docker/pull/54). + +Deprecated Features +------------------- + +- docker_container - currently ``published_ports`` can contain port mappings next to the special value ``all``, in which case the port mappings are ignored. This behavior is deprecated for community.docker 2.0.0, at which point it will either be forbidden, or this behavior will be properly implemented similar to how the Docker CLI tool handles this (https://github.com/ansible-collections/community.docker/issues/8, https://github.com/ansible-collections/community.docker/pull/60). + +Bugfixes +-------- + +- docker_image - if ``push=true`` is used with ``repository``, and the image does not need to be tagged, still push. This can happen if ``repository`` and ``name`` are equal (https://github.com/ansible-collections/community.docker/issues/52, https://github.com/ansible-collections/community.docker/pull/53). +- docker_image - report error when loading a broken archive that contains no image (https://github.com/ansible-collections/community.docker/issues/46, https://github.com/ansible-collections/community.docker/pull/55). +- docker_image - report error when the loaded archive does not contain the specified image (https://github.com/ansible-collections/community.docker/issues/41, https://github.com/ansible-collections/community.docker/pull/55). + +New Plugins +----------- + +Connection +~~~~~~~~~~ + +- docker_api - Run tasks in docker containers + +Inventory +~~~~~~~~~ + +- docker_containers - Ansible dynamic inventory plugin for Docker containers. + +New Modules +----------- + +- current_container_facts - Return facts about whether the module runs in a Docker container + +v1.0.1 +====== + +Release Summary +--------------- + +Maintenance release with a bugfix for ``docker_container``. + +Bugfixes +-------- + +- docker_container - the validation for ``capabilities`` in ``device_requests`` was incorrect (https://github.com/ansible-collections/community.docker/issues/42, https://github.com/ansible-collections/community.docker/pull/43). + +v1.0.0 +====== + +Release Summary +--------------- + +This is the first production (non-prerelease) release of ``community.docker``. + + +Minor Changes +------------- + +- Add collection-side support of the ``docker`` action group / module defaults group (https://github.com/ansible-collections/community.docker/pull/17). +- docker_image - return docker build output (https://github.com/ansible-collections/community.general/pull/805). +- docker_secret - add a warning when the secret does not have an ``ansible_key`` label but the ``force`` parameter is not set (https://github.com/ansible-collections/community.docker/issues/30, https://github.com/ansible-collections/community.docker/pull/31). + +v0.1.0 +====== + +Release Summary +--------------- + +The ``community.docker`` continues the work on the Ansible docker modules and plugins from their state in ``community.general`` 1.2.0. The changes listed here are thus relative to the modules and plugins ``community.general.docker*``. + +All deprecation removals planned for ``community.general`` 2.0.0 have been applied. All deprecation removals scheduled for ``community.general`` 3.0.0 have been re-scheduled for ``community.docker`` 2.0.0. + + +Minor Changes +------------- + +- docker_container - now supports the ``device_requests`` option, which allows to request additional resources such as GPUs (https://github.com/ansible/ansible/issues/65748, https://github.com/ansible-collections/community.general/pull/1119). + +Removed Features (previously deprecated) +---------------------------------------- + +- docker_container - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1). +- docker_container - the default of ``networks_cli_compatible`` changed to ``true`` (https://github.com/ansible-collections/community.docker/pull/1). +- docker_container - the unused option ``trust_image_content`` has been removed (https://github.com/ansible-collections/community.docker/pull/1). +- docker_image - ``state=build`` has been removed. Use ``present`` instead (https://github.com/ansible-collections/community.docker/pull/1). +- docker_image - the ``container_limits``, ``dockerfile``, ``http_timeout``, ``nocache``, ``rm``, ``path``, ``buildargs``, ``pull`` have been removed. Use the corresponding suboptions of ``build`` instead (https://github.com/ansible-collections/community.docker/pull/1). +- docker_image - the ``force`` option has been removed. Use the more specific ``force_*`` options instead (https://github.com/ansible-collections/community.docker/pull/1). +- docker_image - the ``source`` option is now mandatory (https://github.com/ansible-collections/community.docker/pull/1). +- docker_image - the ``use_tls`` option has been removed. Use ``tls`` and ``validate_certs`` instead (https://github.com/ansible-collections/community.docker/pull/1). +- docker_image - the default of the ``build.pull`` option changed to ``false`` (https://github.com/ansible-collections/community.docker/pull/1). +- docker_image_facts - this alias is on longer availabe, use ``docker_image_info`` instead (https://github.com/ansible-collections/community.docker/pull/1). +- docker_network - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1). +- docker_network - the ``ipam_options`` option has been removed. Use ``ipam_config`` instead (https://github.com/ansible-collections/community.docker/pull/1). +- docker_service - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1). +- docker_swarm - ``state=inspect`` has been removed. Use ``docker_swarm_info`` instead (https://github.com/ansible-collections/community.docker/pull/1). +- docker_swarm_service - the ``constraints`` option has been removed. Use ``placement.constraints`` instead (https://github.com/ansible-collections/community.docker/pull/1). +- docker_swarm_service - the ``limit_cpu`` and ``limit_memory`` options has been removed. Use the corresponding suboptions in ``limits`` instead (https://github.com/ansible-collections/community.docker/pull/1). +- docker_swarm_service - the ``log_driver`` and ``log_driver_options`` options has been removed. Use the corresponding suboptions in ``logging`` instead (https://github.com/ansible-collections/community.docker/pull/1). +- docker_swarm_service - the ``reserve_cpu`` and ``reserve_memory`` options has been removed. Use the corresponding suboptions in ``reservations`` instead (https://github.com/ansible-collections/community.docker/pull/1). +- docker_swarm_service - the ``restart_policy``, ``restart_policy_attempts``, ``restart_policy_delay`` and ``restart_policy_window`` options has been removed. Use the corresponding suboptions in ``restart_config`` instead (https://github.com/ansible-collections/community.docker/pull/1). +- docker_swarm_service - the ``update_delay``, ``update_parallelism``, ``update_failure_action``, ``update_monitor``, ``update_max_failure_ratio`` and ``update_order`` options has been removed. Use the corresponding suboptions in ``update_config`` instead (https://github.com/ansible-collections/community.docker/pull/1). +- docker_volume - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1). +- docker_volume - the ``force`` option has been removed. Use ``recreate`` instead (https://github.com/ansible-collections/community.docker/pull/1). + +Bugfixes +-------- + +- docker_login - fix internal config file storage to handle credentials for more than one registry (https://github.com/ansible-collections/community.general/issues/1117). diff --git a/collections-debian-merged/ansible_collections/community/docker/COPYING b/collections-debian-merged/ansible_collections/community/docker/COPYING new file mode 100644 index 00000000..f288702d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <https://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + <program> Copyright (C) <year> <name of author> + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +<https://www.gnu.org/licenses/>. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +<https://www.gnu.org/licenses/why-not-lgpl.html>. diff --git a/collections-debian-merged/ansible_collections/community/docker/FILES.json b/collections-debian-merged/ansible_collections/community/docker/FILES.json new file mode 100644 index 00000000..917d848c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/FILES.json @@ -0,0 +1,3295 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "15d95915d1a62e0fa492ba3fbe276c064fc3d1e673c4baf91924ac40f9631351", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4012af07685df5bf9f84c830c487dc4d2689d05d5aef7f66c1abd86c1af659e9", + "format": 1 + }, + { + "name": "changelogs/fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments/.keep", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5ec40702470c37a5e1ab8c1cece24dbe64b8cabd2b97650a17444d0c6d315533", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "867e4077b2e661f834372ab4dc4cdc63693c448d2aa5971fa762474d5cedcbe1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm/tasks/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm/tasks/tests/options-ca.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d19d6c3ee524827daacd13c651c57239b83b2470596e73158e7cf378975da439", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm/tasks/tests/basic.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47495bac1e1bd14f4e1c832bc527113acc68707a4b13d85c54fa9b584f799898", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm/tasks/tests/remote-addr-pool.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b26da73f25849e4c4a7f74060c0348e68227caeb3c29a6d3d70237844e48f998", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm/tasks/tests/options.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "397aca2a0b963a65c5d0aa341e543e60be6748882d30ee9d717031ece533def9", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ce76d01b6fead8139dc4676535e795f4010017248275fba1a0ae9a89ecf34e92", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm/tasks/run-test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm/tasks/cleanup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6aecdf9b8af5eb8aec7209c2004b2b5dc0578e0d47a0d14d97ae61bc2034c922", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "182df7b6b5261dd330ca1164ab34f506696fcbcefa63d58c85b59eabcb1549da", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "10e71a368970eba75f8962b673fe5a37a45b0aee5a30d9f7eff640c92e96f589", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be973d549544265688017815cac90b8dc5ce9fb938914cd5f1a68944144094d8", + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker/shutdown.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61483a14ee4a57aa26ff4efe8eacf9bdf59ef7a2bed13724e5f96bd2deb5934b", + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker/setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "10e53d7ac39a3f38d1c984eeec08092ebb4a73b40ecc59703293ab6c3a8d2def", + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1edcc3c442bc04b7a54ceb9d520de8e9415ad947c81a1125ca59972273a8c704", + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker/runme-connection.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e", + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b2a3ac9cc54555fb5a36b752c5d1bbcec31ca0a83282562403ded8151e71ec5", + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker_api", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker_api/shutdown.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61483a14ee4a57aa26ff4efe8eacf9bdf59ef7a2bed13724e5f96bd2deb5934b", + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker_api/setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "10e53d7ac39a3f38d1c984eeec08092ebb4a73b40ecc59703293ab6c3a8d2def", + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker_api/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker_api/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker_api/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1edcc3c442bc04b7a54ceb9d520de8e9415ad947c81a1125ca59972273a8c704", + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker_api/runme-connection.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e", + "format": 1 + }, + { + "name": "tests/integration/targets/connection_docker_api/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f0d9fd5de2f2a7314033eb2fc480e2257f0399bc9e96d85937a42243d0ce1c5", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "050157a29c48915cf220b3cdcf5a032e53e359bdc4a210cd457c4836e8e32a4d", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2441ac1753320d2cd3bea299c160540e6ae31739ed235923ca478284d1fcfe09", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bcb3221a68dc87c7eae0c7ea50c0c0e81380932bf2e21a3dfdee1acc2266c3f3", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e273324ab90d72180a971d99b9ab69f08689c8be2e6adb991154fc294cf1056e", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_prune", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_prune/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_prune/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_prune/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_prune/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b40474475308fc306d7b4a97ce1ce0d1a0f83632cddd67fe005e535fab8f9fa", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_prune/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be973d549544265688017815cac90b8dc5ce9fb938914cd5f1a68944144094d8", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_pkg_mgr", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_pkg_mgr/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_pkg_mgr/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a3ac16c80747874defcb3b8bcd0cf796fb847b440ee5ae240d7b5b71cf32c0e", + "format": 1 + }, + { + "name": "tests/integration/targets/connection_posix", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/connection_posix/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba6bc4b7b7f06e33b61092629dbd2f094b2d814d5cb051650b7494031fba6bea", + "format": 1 + }, + { + "name": "tests/integration/targets/connection_posix/test.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_node", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_node/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_node/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_node/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_node/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "883cb600f2f7de7fe5afc8aeb8c278eb91585bd3d2313fc9ddf3c1c7b371702e", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_node/tasks/test_node.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1bee30ea26db86db833a3c73c6d7a923ea3b1aa092535484fda3523887780ddc", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_node/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f145ee8bea74b32b2ffc7520483286e946932b73ae831d0e60de9dd0bfa39ebf", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31d586af76d9684e993af8b2df0f14dfab96be4b0b9c293c42644aa348f9b284", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be973d549544265688017815cac90b8dc5ce9fb938914cd5f1a68944144094d8", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_containers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_containers/inventory_1.docker.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b5ffaf873097545f415605f8439a82357c043c76c11747e0c9fb1b3251a92712", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_containers/inventory_2.docker.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5209c9f34dce64b0c78257053a99738b8be0b516587ae2c558bcbcff9eebd3e2", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_containers/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_containers/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_containers/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "385d6e8146fca3f00956d073b658a3b0369e3742f8460204ec41207aaf863354", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_containers/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bd40dd4d924aa624c52214c3c5a7eec7dc4b5efd4f87ab5e074452bec6bbb0ac", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_containers/playbooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_containers/playbooks/docker_setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "01c94e8bf48fc57a710ca0890fd004ed41bcb654b8a75d999a3e5bc3ae91f09c", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_containers/playbooks/docker_cleanup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "418da6820cde100af6d3b2ec0cb963d57780e49308baf4f4c3424831ea5926e0", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_1.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f3832123345d38dd3ea6da824e33651cc757df4f8d697a4297492db78c2a0a73", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_2.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a37c0def8e68890b897d75ba3a0b4c54667e513df86181f2d657dc1c67546e9", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_login", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_login/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_login/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "effcd9450b3f039a7c09bf7f5cdf6a296456d9b0b649ed5e4bb3eab045da0081", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_login/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_login/tasks/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_login/tasks/tests/multiple-servers.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d07fd22024a04828d288dcdc599b7abcaa4c6fda395996a195c91c89e24227f8", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_login/tasks/tests/docker_login.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "81cedf41ba32f65a6297cdabe78fc67e3cf5152fa0ad3dcad6e2d603db5d05f6", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_login/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee890763d871e913cc6880f76bc950cefe3970916dbf472de7fc0b6e72d1e44e", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_login/tasks/run-test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d67bb6a11ed135170fd45e3bdeabd04bf7eeb3c1e10f6830b65b82e16ff0820f", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_login/tasks/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f2564eeb7cbbf2be8c7aa444c2852098ee2c07d0d710a85c54c5cdfb819bd37", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_login/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be973d549544265688017815cac90b8dc5ce9fb938914cd5f1a68944144094d8", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine/inventory_2.docker_machine.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf165a51db50a56421594b7fc93f1099d5e9c8114055733519848d35c782941d", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine/docker-machine", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e73a5b2c73dd975127cdb8df5182de3cdc648e1b87494ec81e5318e311d77dd1", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine/teardown.docker_machine.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d2b3ce39f63d211c06eddc3a1c6079439ac1e27b146f9fd8148021c0c8eb9fa6", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6da1af57d46baf2fc6416741de9ae91e4d32ef63f84c14cd12e5351ccb066999", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c35017ffb24489c0276a884a9be89af7fcfc74fc49e70637a2f5c09bed7d7e46", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine/inventory_3.docker_machine.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "32d0c2c3d2c6c3a859d0aee7053921ef489d37f041615b0d474121b2624bf4d4", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine/playbooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine/playbooks/pre-setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f61d2216516f7aa1d8d1777150bd952c854372966c6aff0e2ce6c113851dff7", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine/playbooks/setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72d91645ecdb49c881426a17d8a1af014c3987b35125c0f83ec648b5e8d665df", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine/playbooks/test_inventory_1.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ecd91df2da2c3f4b54ec835892793b7bb0f738f5f9ec171ceed0f6990fa5a5f7", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine/playbooks/teardown.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e56a2b9e428cdc2082427ee41bf27cbf04d172e83243a4c0041df90208f8ef2d", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_machine/inventory_1.docker_machine.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a8795f2f8f3ad20f5e464bcb2779f52d7ff5844bd47f9be5de3287f69a581287", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_host_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_host_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_host_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_host_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_host_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af0a23f6387c3d8a125b243420f2030eaba632664c005a3999eb8fb029e39cb8", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_host_info/tasks/test_host_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69d43f186ad9eb8eebd52cd95f28fa4f660822fb68338c6c6a5637bee4570890", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_host_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be973d549544265688017815cac90b8dc5ce9fb938914cd5f1a68944144094d8", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/files/env-file-2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1cfed9ac20bcf4e6325702546862e8347616d8da6bf8714a6facacc4fa4463df", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/files/env-file-1", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "50a89c391ed8404416ef6ced670c291e4b9e6ec5f509fbf57dc2b0385912b166", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/tests/placement.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1983ae0e64d50c206ae101e039c3f711f60d926a187cff50e9bef179eab96d7b", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/tests/secrets.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "401715167deadc80859c34549304fee97eb0a4f25b39fb392e44761184ae06eb", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/tests/update_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "171d5378521ae70b120f8c8a6fc6783319ee14d0c8105850dc1932e937d20d5d", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/tests/resources.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "37296c16a8234697444fdb14f0745bea3a8d775eb1940b190531859094cc17f2", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/tests/restart_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9225b2b34ae34aab73ae66a320d13e60b5437ba9c968517111cf8512c4fa23ac", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/tests/rollback_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6c9bd1f62edb22c34cbb58a3be65d98d29cc1ed600c11152edca572adc6ecaa6", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/tests/misc.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf90f8164f952b47302a1adae64b7134aa66f552203fcb3950e905f8386765a5", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/tests/logging.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a137b314b63c538656651621cbf4574d43e7d207fa7fae440925da9ed911bcf9", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/tests/mounts.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a8ce252c415c957a8148ca9b7aee0d230cdcc4711d976b7e420ad69a99e735f3", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/tests/networks.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "27f74fd187ebef29cae2792ae2dae38e8aba279a5326b9b9e28b0824e634e74f", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/tests/configs.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e16dcd0316cd14ffdf7410610d9e02cac67e939d690d5257be2a6eb205bbbc8", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/tests/options.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "162fbfaba5a2b6449aebed5ffcc80fba203b58455102f6b68d070f44cb540fd1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c4955832d7961978a22679dfc1b4faa7bae3488a5d71c7400b851dd8a44c6865", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/tasks/run-test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d67bb6a11ed135170fd45e3bdeabd04bf7eeb3c1e10f6830b65b82e16ff0820f", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1402f9933e2910bfbc0b6e95def618f1031456778b4f2db102acb93d8fc858e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5ca78b97bcbec98bca1d7d08a6a68d12cbe1c2671eb52fbcdd25e89d6124ac31", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_node_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_node_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_node_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_node_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_node_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "005e4872497ab2f91e82e7e1cf8da7b80027aa2d791528486ee7c932cc939176", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_node_info/tasks/test_node_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c154d0c71f371be9e83cdae504c3584a1508ff642000e9e99b5e46dcab4680d", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_node_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b90daf51c8e2e20dbc336060a54eedab09fd4bec7f4ea86249fdfd01ffe2a3f", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack/templates/stack_compose_overrides.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16b3c31f92970815706d4d206e0a98cce29e28aedfdec17f803fe8a789462a09", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack/templates/stack_compose_base.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7cdba12ae8999f6f7e44b65aaaa4ffa24221fa957b928eace52f10e37eab1ba6", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0f1569368ad85f40153d66eb99b3490afbd88ebfabcfa36059682f81e05186b9", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack/tasks/test_stack.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c37ed33f4a92c8e525b83df0f889ea522208bed08f3196a7dfeb3d017b56737", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b90daf51c8e2e20dbc336060a54eedab09fd4bec7f4ea86249fdfd01ffe2a3f", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1bb9483bbe3c899533e31d8794251fa0eccf41ced205e3169f44ded2b10c7a00", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b22fd2cb0cd07292350ec15f9ada3c9932c589631ac3a7bfbef1634da9bc9e7", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "373215997d27cc14aa1439e68449c48e89c1991ee2860a2d179c4223a6615847", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_constraints", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_constraints/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_constraints/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_constraints/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_constraints/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b625673c8e7b2a5dced6117b01aec6f25452246117746bd23450dcf389a61883", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_constraints/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b34f7e77b6117dd66a49957c17656b22e5dfa444d8f93af2b4e1d7f1450a5a3d", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_task_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_task_info/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_task_info/templates/stack_compose_overrides.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16b3c31f92970815706d4d206e0a98cce29e28aedfdec17f803fe8a789462a09", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_task_info/templates/stack_compose_base.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7cdba12ae8999f6f7e44b65aaaa4ffa24221fa957b928eace52f10e37eab1ba6", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_task_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_task_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_task_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_task_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48e555c194784189d6bcb0503660b8acaffa853efa2b75de1dd4add4ec3f8e0b", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_task_info/tasks/test_stack_task_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2e0f1cf44831d325c34b056cfc865589a4666c7560da33f68a8ab8248195b2aa", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_task_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b90daf51c8e2e20dbc336060a54eedab09fd4bec7f4ea86249fdfd01ffe2a3f", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_task_info/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_task_info/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1bb9483bbe3c899533e31d8794251fa0eccf41ced205e3169f44ded2b10c7a00", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_openssl", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_openssl/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_openssl/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "edd8a1449dca1cf2ff7cade76d428a198a21d20a8b7b989fc4fe8ce3b0a93f3e", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_openssl/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_openssl/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "762f82e1a0ffabc9b944747be33838038191feac371dba39590a442e7123ac36", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_openssl/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_openssl/vars/FreeBSD.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "11b3e754dbf0a5357c123f9451897f0705315cca84f09ad232ac452068f1c931", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_openssl/vars/Suse.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "35cea19724e586049bd529659f11465d2be9a74956a2e36d39297e7a937dea51", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_openssl/vars/Debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "35cea19724e586049bd529659f11465d2be9a74956a2e36d39297e7a937dea51", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_openssl/vars/RedHat.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "35cea19724e586049bd529659f11465d2be9a74956a2e36d39297e7a937dea51", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_swarm", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_swarm/inventory_2.docker_swarm.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "42055be7b82fff08003714b40008d4212ae532822bb836eec59388b36c92f082", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_swarm/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_swarm/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_swarm/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f145ee8bea74b32b2ffc7520483286e946932b73ae831d0e60de9dd0bfa39ebf", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_swarm/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "51490300a5521e905d783638d4bdce403be57c74c8622d5f20d78df5c4e70a07", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_swarm/playbooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_swarm/playbooks/swarm_cleanup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "048d09306680a238ebe8fb6c65cc4d9a8ed198be06e6bd8bb355728316481255", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_1.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb2b94163ae09c00bfd545b7284abf64e938f7cc2cfd5bcfb38988bb45cdc7db", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_swarm/playbooks/swarm_setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3eb10ff9abef371cc1b14f9c28864977a0d1713fc23336147c6d4aa39ee03c6a", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_2.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "837e035480fdac759eb72245a58dbace266ffbf52cc8cb11ff42b40bf2a51122", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_docker_swarm/inventory_1.docker_swarm.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "55a7dfbef9d4685d27847a700dcbba6edbdb6034d756c12b172022687687e484", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5ebde042b81fed14a9bf1a133f42e3ba5167e0d7adb521300bac2dcd21bfb7a9", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f5c28e0fee2e5a47dd3e2e6dcb02b206d2996e8cd55b56e727fadf305cb9e2e", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "edd8a1449dca1cf2ff7cade76d428a198a21d20a8b7b989fc4fe8ce3b0a93f3e", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/tasks/Suse.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "617bed76a6bc1cae5b132387f4170101b65f6372364333450936c57b9a999c3b", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/tasks/RedHat-8.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aade35ced464abab994e32183ebc792e44429a4200ae0f918e10268af3267f04", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d947739a9f927879c50d57be5a8de62f8b7b74757adb581cdc9e746ead7ba1c6", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/tasks/RedHat-7.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08a566f60100d35b6fb615328cffe6fd393ebd6d6dfb9f9f931215757045ce82", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/tasks/Fedora.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b28f7c0c7f2fe7bf214a2fe7a69f84e70bdf5e1fbb36d42f09aaf488c96b0f40", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/tasks/Debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2dd5fcbafd317b27c144bcfae36925a0eca48acbd1059acb31835b5b84bb26fa", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e00b460f6ad8bd7ddc6ff35a7c4037c79748a7e5dbbe4dcbb23fceb11245d0e3", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/vars/Suse.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ba763ddb6508c4841b1c75195618d10cd66de0477e270b2922ed8559ea060a1", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/vars/RedHat-8.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79bae46283e6502a6937ffd87991d4d50ef3d4767eb518ee38d1dc1be1ea03ea", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/vars/default.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c0c4fe950c7c1a4bccd60f47ad339c0083f5d0a5c0d87fa6325d0fe93a132a0", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/vars/main.env", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3120d28f73a290e24b9bfa934ddca94c04f118e7372d6cecd7a6b1934f8cc720", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/vars/RedHat-7.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d35bd8e8d2f6a7ce41a5a507b89c9d7b24cf29b04e7c8e300c90256e01341d9", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/vars/Fedora.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/vars/Ubuntu-14.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb0dc8a2014d527eaf8c4ccdf422b742cc077effe4a65b2a8e56783419248898", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker/vars/Debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d5903fbb4deb30763277c98771e2f3f299940eb9518688d3031257712d00aee1", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_epel", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_epel/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_epel/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4013a5ad79e7944851ff6a5be0d25dbb2e4354aa6be08e3c435d7707e1d8576c", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service_info/tasks/test_docker_swarm_service_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b29795a2716d68ba7f457b53a7b7ded35c109dd6e2cc6205a8f0114108eaeb71", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff1e241dd69d370b0e2e74c5d693e13952b88368ff780fee159ada4a4adda27d", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_service_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1402f9933e2910bfbc0b6e95def618f1031456778b4f2db102acb93d8fc858e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network/tasks/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network/tasks/tests/substring.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b829b6330954ea3eac30b554f1a884c2199898d5046b49ddeb54ba00cedc6142", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network/tasks/tests/ipam.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0a1e18b4292a54341a2f950a0744772b914026aad4d1f41c3b0e8e994fa4f45b", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network/tasks/tests/basic.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b5c8ba85673aa1da939d015987f61cc4adea700f2c97990fbdfcc54cd464b52a", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network/tasks/tests/options.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "996f3c0e078165a96d3050aee092ebe6361ffad83537efa1dbd3dd25aceeac79", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network/tasks/tests/overlay.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1bc0ba9f389dcd122006eda7892d68a3365bf2d97a6c1d4eebfc7f34c7d934f", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9bcaff2d64557d25e56a99ddf28d196287ab4e1319a2d5421b44c20903f83ad", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network/tasks/run-test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d67bb6a11ed135170fd45e3bdeabd04bf7eeb3c1e10f6830b65b82e16ff0820f", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be973d549544265688017815cac90b8dc5ce9fb938914cd5f1a68944144094d8", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/files/env-file", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "50a89c391ed8404416ef6ced670c291e4b9e6ec5f509fbf57dc2b0385912b166", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/tasks/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/tasks/tests/regression-45700-dont-parse-on-absent.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d24a02b3c81b7c347a224f7e5ad0e701c25b721710aabc9fa9c66a70787ae369", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/tasks/tests/ports.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d8d7d001854b717cd6b257d21099b63697d020e91a7af1ded48ff2968efb94de", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/tasks/tests/network.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fbe53e0349db894edebdeb1cc364d65d5d2d1fae3b0ac31b7ae6c56d6d1cec15", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/tasks/tests/start-stop.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6b5539326cf9097cd6b3e0486cebac31ee1dcf086689c14095201dec9e3c5d16", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dfda3813acfaa7d6f3f1d6ad69cfd2a0cb4bc8eeb923fc81f813d9b8d32421ee", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/tasks/tests/comparisons.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb4c255a5eee97f4c75c33248a2e66f5d9399b2739ad87ffc39f9a1b7b826de9", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/tasks/tests/compatibility.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a49c8747c60ea755405456d1e6483402853a9fb34d0d46151d5d951ef8a3fddc", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/tasks/tests/image-ids.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a9d5939abbcd79ca3491484b030b0a8f5d744b2f700aa6903c16d443727f7384", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/tasks/tests/options.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c48f48e6dfa5af6f7bf94bfec4972d37b9baa87d65e2f7faed6e237a69e681ef", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1fb911d95c5f2e8071534edcca10bca148d74fcb05ca0f307ffbd60612d9b5f3", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/tasks/run-test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d67bb6a11ed135170fd45e3bdeabd04bf7eeb3c1e10f6830b65b82e16ff0820f", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "373215997d27cc14aa1439e68449c48e89c1991ee2860a2d179c4223a6615847", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/filter_plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_container/filter_plugins/ipaddr_tools.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c3de6a0d727eb85ab3d9c066231c4f73ac1a1ecb147f235df97c8cdfa9343d9", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_info/tasks/test_swarm_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2355f9d04e7057f8e9ae5ec535d2da6d58c4ac171d276a0c74fa9fb4b581a43f", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d6ef035de264dc97206b79d51da4d5f4406196e024f46b0ee91cca44274882e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_swarm_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08a5ac67dbbde608b42f081135f93a1f3462537e953a9725662c7a7a368c5536", + "format": 1 + }, + { + "name": "tests/integration/targets/connection", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/connection/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e084a3683ef795d1cdbf5e9b253f2ca1f783ae0d0d6e47e419acbbc4fc80bbfa", + "format": 1 + }, + { + "name": "tests/integration/targets/connection/test_connection.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3297fe2040e5b0c523fd6f14bc0c56a886980c2a1b241b93bcce847958528861", + "format": 1 + }, + { + "name": "tests/integration/targets/connection/test.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f32dbff55de60ace66e2555586b94abd0f74f6bbcc008eb8d1c25dbfcc464a3e", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/files/nginx.conf", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f7eaed3c6f5778062768d3cc33452679378888e5aa5fbad1914a8aeb6c4acbd", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/files/nginx.htpasswd", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cade308eee36a0197cd4949387d8646aae59fe1b66b34242cafb2bbfdeef6706", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6eeb58604285e5dc380dff4451d7705f17c8bc7c09958be560d20ac738885e60", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/handlers/cleanup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f2217b169f755cc0a531d1fa8b30aa61aa5b978709ff039f8d4ac404a91cd7e6", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "97c3c36f2bd05e2ffd694efc2a84dcc02b2f1912bcdd48ead759c1044212091a", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/tasks/setup-frontend.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8f833cbb1ea2e575d7708e5b4535beb8e4a66519e49e475d3c367d730f297d7", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/tasks/setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cd1c2f77957da0e4a3da3175d30f018fcea93581e72b1d9f1d3e3c54ada63a90", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c033292d39cd16d8eb02e73de0b935cc05a719866fb111268b870a3c05df11f0", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fe80079c35cf640ed58583d338090127a92e0c920eb2e58c1d692fa6dcf66a77", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_docker_registry/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c0c4fe950c7c1a4bccd60f47ad339c0083f5d0a5c0d87fa6325d0fe93a132a0", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/templates/MyDockerfile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec99d784de7d6f04f25b7a5585a15df27bd7af4127cce9efdd629c0980b1dffe", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/templates/EtcHostsDockerfile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6e75d2a307018b4c24faadb616f679f2873551369ded52ec838b12e43c8c0d0c", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/templates/Dockerfile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "71bbe9d5a3d7096ac98dace9e355b7d30ed00939355fbc8febad75cce9c1c802", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/templates/StagedDockerfile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f85dfa61ffd1cfd05bd5af9edf55326812150527156e1025a3edb5c72a0636f1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "effcd9450b3f039a7c09bf7f5cdf6a296456d9b0b649ed5e4bb3eab045da0081", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/tasks/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/tasks/tests/docker_image.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a78280245492cfaf6b556f0e299cdb853680528421a3fde3f9b55813956d1f2d", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/tasks/tests/basic.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21eb803420b89ead65f5cfbcf71b72d8265e2baeabc4843bc745ebfd904613ce", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/tasks/tests/options.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dade5a470f52848cd321023cc2c1593a5f3e16aec9cdd19900f563298d79dd8c", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e26e03bbf9fa23fc684d7ae5e9ee90a0fd081c2cadf5ed666c039c4dfe25e90b", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/tasks/run-test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d67bb6a11ed135170fd45e3bdeabd04bf7eeb3c1e10f6830b65b82e16ff0820f", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/tasks/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5ff27f27589585c9348b29d8e6d10f54cccdee1a30c26f8bc793ed7f525f493b", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_image/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be973d549544265688017815cac90b8dc5ce9fb938914cd5f1a68944144094d8", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_secret", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_secret/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_secret/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_secret/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_secret/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c3395c3c99e19af68880f2e803328611cf85ea5da46e680cadfce588f6c47410", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_secret/tasks/test_secrets.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b56c6357e1011252a6710cd4aceaa217d4fb6caed8209ce332cd9515f086affc", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_secret/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1402f9933e2910bfbc0b6e95def618f1031456778b4f2db102acb93d8fc858e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_config", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_config/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_config/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_config/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_config/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed708322aa6495a9f796c6e69b147861a18d54b85393b33715d24b5820bdb434", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_config/tasks/test_docker_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1fc563ad7c63d710b97668eea1c7ba38fc4fcfafc97b2a1c9509f5380bd5da98", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_config/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1402f9933e2910bfbc0b6e95def618f1031456778b4f2db102acb93d8fc858e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume/tasks/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume/tasks/tests/basic.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f9980eb3289036f29aaa85cdfc51b711ab19903149fb98eeecdbe72ef603a53", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d32809876f7c309d60ed16725e245db8efa6034c980a101373a13b9beeee13a", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume/tasks/run-test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d67bb6a11ed135170fd45e3bdeabd04bf7eeb3c1e10f6830b65b82e16ff0820f", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_volume/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be973d549544265688017815cac90b8dc5ce9fb938914cd5f1a68944144094d8", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6459cefa9faaad2e3f255e58ed076406e1e8e11a3dec495943e5fba52ee8b737", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_network_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be973d549544265688017815cac90b8dc5ce9fb938914cd5f1a68944144094d8", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_info/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_info/templates/stack_compose_overrides.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16b3c31f92970815706d4d206e0a98cce29e28aedfdec17f803fe8a789462a09", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_info/templates/stack_compose_base.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7cdba12ae8999f6f7e44b65aaaa4ffa24221fa957b928eace52f10e37eab1ba6", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2061a2bff39efefe2e0761bd95b22bc84e389e2cb6b99b06f14ac4d6ef25af9b", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_info/tasks/test_stack_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "76cdd5b6e08bc25eeb204177a2d98bf0918696f8a78c580d923c1cbbe74b5467", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b90daf51c8e2e20dbc336060a54eedab09fd4bec7f4ea86249fdfd01ffe2a3f", + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_info/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/docker_stack_info/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1bb9483bbe3c899533e31d8794251fa0eccf41ced205e3169f44ded2b10c7a00", + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat/builtins.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ca4cac919e166b25e601e11acb01f6957dddd574ff0a62569cb994a5ecb63e1", + "format": 1 + }, + { + "name": "tests/unit/compat/mock.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99", + "format": 1 + }, + { + "name": "tests/unit/compat/unittest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5401a046e5ce71fa19b6d905abd0f9bdf816c0c635f7bdda6730b3ef06e67096", + "format": 1 + }, + { + "name": "tests/unit/compat/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c513e9b8cc782de958998de1754c2ab63a8e9edc7f0bd132bd5d5ec75dd93478", + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_docker_swarm_service.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31917a2516e8e3f4fbcb8970fad4a8b6a106a5c10ce749f48b7bf566b16958ff", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_docker_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "34f1207a528a452f075e64ba31ae635dece97c795980bdba6e0935f58803eb16", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/conftest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "909818cefd5093894a41494d1e43bd625538f57821375a564c52fe4219960967", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_docker_network.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7d070319c695be362387074762d5b6a05a99c0bf76edff657ffa1fe3d76a62dd", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_docker_container.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "103cadea7a502f45025aab6e6be8cd20d28a2613e0650b801e2ac7146e8dbfc8", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/plugins/inventory", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/inventory/test_docker_containers.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bba1572e723be429b5f64e8abe7dd5f5a18518f5aad523c9a796bda2b064cd1c", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_common.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9db4cd788682b3056a9cf3eab61c1232fe8c89d90fc6862b6755e230e51b78a", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/plugins/connection", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/connection/test_docker.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "35b8c35f9d05510c2ab31d3cb481dec92ec73d64df35445c5868e2fa6669ea25", + "format": 1 + }, + { + "name": "tests/requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4b7027b50ae2a1ab175b69cdafee18e9f67ed4ce5100b86147097e10645b25b", + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.9.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f70054b23d72127f5c72900095c7ac8c0fba03cfa76785d0489079f58b2a3d28", + "format": 1 + }, + { + "name": "tests/sanity/extra", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/extra/no-unwanted-files.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a3d3b17f699b042958c7cd845a9d685bc935d83062e0bcf077f2c7200e2c0bac", + "format": 1 + }, + { + "name": "tests/sanity/extra/no-unwanted-files.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1468e7b22ba353d18fcf2f5b18607873f792de629f887798f081eb6e2cd54fc", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.10.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f70054b23d72127f5c72900095c7ac8c0fba03cfa76785d0489079f58b2a3d28", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.11.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f70054b23d72127f5c72900095c7ac8c0fba03cfa76785d0489079f58b2a3d28", + "format": 1 + }, + { + "name": "tests/utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/utils/shippable", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/utils/shippable/timing.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ebb7d3553349747ad41d80899ed353e13cf32fcbecbb6566cf36e9d2bc33703e", + "format": 1 + }, + { + "name": "tests/utils/shippable/units.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a712977b416e5b93325b40d0bf855e4817e597076552f79777aeca8d2fa192bd", + "format": 1 + }, + { + "name": "tests/utils/shippable/rhel.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92", + "format": 1 + }, + { + "name": "tests/utils/shippable/shippable.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "940f631444c7c2d67710d8e2582a97a4286da2640949c599cf64de1c74a493cb", + "format": 1 + }, + { + "name": "tests/utils/shippable/linux.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "07aa5e07a0b732a671bf9fdadfe073dd310b81857b897328ce2fa829e2c76315", + "format": 1 + }, + { + "name": "tests/utils/shippable/remote.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92", + "format": 1 + }, + { + "name": "tests/utils/shippable/check_matrix.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e975d3b968d6ec52ca487639624cde5971469996076df16e3e5a1f872865387f", + "format": 1 + }, + { + "name": "tests/utils/shippable/cloud.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dd953f7e779b9962e76492c389142e03174e84a8115f53e56628e2af9e66b818", + "format": 1 + }, + { + "name": "tests/utils/shippable/sanity.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2cacc1d02f832de1bffadc81cbd4f178caef4b93f2f92757f408d5686aa001a9", + "format": 1 + }, + { + "name": "tests/utils/shippable/timing.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f3f3cc03a997cdba719b0542fe668fc612451841cbe840ab36865f30aa54a1bd", + "format": 1 + }, + { + "name": "tests/utils/constraints.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5beb3383ef5546038c2c00ea8e5f438e607d91828ce2259594fd8fbaea003ec9", + "format": 1 + }, + { + "name": "COPYING", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986", + "format": 1 + }, + { + "name": ".azure-pipelines", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".azure-pipelines/azure-pipelines.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9d86aace96a4bcd3e2bec62afa61c36ddba84b30f3a7f05a77d2d6bec4c92ed", + "format": 1 + }, + { + "name": ".azure-pipelines/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".azure-pipelines/templates/matrix.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4fb0d3ffb2125d5806c7597e4f9d4b2af69cf8c337e9d57803081eddd4a6b081", + "format": 1 + }, + { + "name": ".azure-pipelines/templates/coverage.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "daf1930264760d47b54588f05c6339fd69ca2d239c77c44bc4cee3c4e9f76447", + "format": 1 + }, + { + "name": ".azure-pipelines/templates/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2cfa1271f94c71f05ffa0b1f763d8946394b5636e14579cda8ee14bb38bbcf1c", + "format": 1 + }, + { + "name": ".azure-pipelines/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61f20decd3c8fb34ac2cc6ff79f598fc5136e642130a7ba065ccc5aa37960cd2", + "format": 1 + }, + { + "name": ".azure-pipelines/scripts", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".azure-pipelines/scripts/run-tests.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb08a3ec5715b00d476ae6d63ca22e11a9ad8887239439937d2a7ea342e5a623", + "format": 1 + }, + { + "name": ".azure-pipelines/scripts/time-command.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0232f415efeb583ddff907c058986963b775441eaf129d7162aee0acb0d36834", + "format": 1 + }, + { + "name": ".azure-pipelines/scripts/report-coverage.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f6a373322759ccc2736fb25d25d8c402dfe16b5d9a57cfccb1ca8cb136e09663", + "format": 1 + }, + { + "name": ".azure-pipelines/scripts/publish-codecov.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2662ee5039b851fd66c069ff6704024951ced29fa04bf1e2df5b75f18fc2a32b", + "format": 1 + }, + { + "name": ".azure-pipelines/scripts/combine-coverage.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e34d4e863a65b9f53c4ca8ae37655858969898a949e050e9cb3cb0d5f02342d0", + "format": 1 + }, + { + "name": ".azure-pipelines/scripts/aggregate-coverage.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "820353ffde6fd3ad655118772547549d84ccf0a7ba951e8fb1325f912ef640a0", + "format": 1 + }, + { + "name": ".azure-pipelines/scripts/process-results.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c03d7273fe58882a439b6723e92ab89f1e127772b5ce35aa67c546dd62659741", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aba3b6331e22cd338bb955af5af37cb4a3e0685b4d45e61b76a37a651377e8e5", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ca8d8d439d4f16f766ca561090f829ccffbed3d059ba8cbef767f3713b6c6f1", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/docker_swarm_service.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5fd2a4b3de6c1081fbd8cc4ead26ea46bbf8c5cc6d338aef8e0b2dd231321490", + "format": 1 + }, + { + "name": "plugins/modules/docker_swarm_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "107822f7d083a090dd3e22bc2a1193196205992ddab52a1ddcb315fe61d14026", + "format": 1 + }, + { + "name": "plugins/modules/docker_container_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a569bc9a8ec2a171a60967af29bc05b3f98106f4b96073b393bed21b56264bb", + "format": 1 + }, + { + "name": "plugins/modules/docker_network_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b5bcf21a21f4228943dbf3455d7d022043913a82da41201ebb40c7cfece42c8", + "format": 1 + }, + { + "name": "plugins/modules/docker_image_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91cd4c6972b6e8c9b646de0ae8dcb96a71320f8dc89973a285ad7e0dceb41055", + "format": 1 + }, + { + "name": "plugins/modules/docker_image.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1d411376aa6691c071e69a9505bd1cbc09d7cc9f0398d6bcd737f74e9a171fa1", + "format": 1 + }, + { + "name": "plugins/modules/current_container_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "468fa38d9a7da4be7cd7b18ce647ff19a0b16b3bf54968c1b9bfb950a130f154", + "format": 1 + }, + { + "name": "plugins/modules/docker_swarm_service_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "805d4597fe0eb9de70bc027b656de806d7439ff382891b244202375519ae3f9a", + "format": 1 + }, + { + "name": "plugins/modules/docker_network.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f477f2eed547591d96dfb4e290ebc14fad8815848afe9a216ec4084ff1979d54", + "format": 1 + }, + { + "name": "plugins/modules/docker_stack_task_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cd68eb6c9d026f2cbbc97d429e69582dfc28b3591de767500240084d46ecc155", + "format": 1 + }, + { + "name": "plugins/modules/docker_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a32f7eca14019dbd192575833fb5e1998d197ea9265a88257aae13d49201e7c5", + "format": 1 + }, + { + "name": "plugins/modules/docker_node.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f018730b6e66c874c41193dfcdabf1a5571dce7c658911dcc310f109aa68e65f", + "format": 1 + }, + { + "name": "plugins/modules/docker_compose.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "348d9e1991da8157588c3f155fbdedf5b0c53e14577cab0a69e865c53edd7b44", + "format": 1 + }, + { + "name": "plugins/modules/docker_config.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "51357730ec575606e55fbf557a547e05976cabffd94d8741adc48a8f1b26074b", + "format": 1 + }, + { + "name": "plugins/modules/docker_prune.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c7152dfbe382e28dc74cf8616938d5e2ca71cee3ad435a00217556a6d9de2239", + "format": 1 + }, + { + "name": "plugins/modules/docker_login.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8fa11795c544dfbe50b745fcf5c82710f0e01aa0a93a74e2cdaaa8880773c6da", + "format": 1 + }, + { + "name": "plugins/modules/docker_host_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "878b0d311ef10088a65d1a18515016724826cb22d34046fb464461469a0cf295", + "format": 1 + }, + { + "name": "plugins/modules/docker_secret.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b2963d308fc86d5bb9b33bffa5d3f1fdb369ae4a3554a9a4409fe9a129f478ac", + "format": 1 + }, + { + "name": "plugins/modules/docker_swarm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1556d311b7cdadd247b387fefcc8b4635d9ded6b4ed828625463b9279e2ea573", + "format": 1 + }, + { + "name": "plugins/modules/docker_container.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36be4d7fef1abb26be563301ccef7be905dab0db2b0e0df962a796e96ec1257a", + "format": 1 + }, + { + "name": "plugins/modules/docker_stack.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a4b46955f5f3069d881f20df9439c221fcf4554a97032325d3054f0b6c3bb70b", + "format": 1 + }, + { + "name": "plugins/modules/docker_stack_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bdca5344d56268c166c1b721772d336c096aed488a10143fb56dd3d3f42a7b7e", + "format": 1 + }, + { + "name": "plugins/modules/docker_node_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c86fd938ee1645139c6aec06b326bbb8b277195aee35ab91cb6040a8ecca221", + "format": 1 + }, + { + "name": "plugins/modules/docker_volume_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7e067f07db942259d155d1e2f42001aab7ed46c8d2f872c7ca1ec934b1f7f730", + "format": 1 + }, + { + "name": "plugins/plugin_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/plugin_utils/socket_handler.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8709e42459561ae14a922188c7009d303a43a4b68997769bef8f8e8deaa979b9", + "format": 1 + }, + { + "name": "plugins/plugin_utils/common.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79147533ec30793c7a3080a00c7c006beb0ed323df2600332d3a2642921887fa", + "format": 1 + }, + { + "name": "plugins/inventory", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/inventory/docker_containers.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1352f8fd57da2f219958f94a74953961d78c48a019410e0902a9026f8ceae564", + "format": 1 + }, + { + "name": "plugins/inventory/docker_swarm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f302c39f6d25a63b8d8ad618afa8bd3659b10cdb7c641f1b1218091c831d7367", + "format": 1 + }, + { + "name": "plugins/inventory/docker_machine.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a7d3f8dfdffbd91dd70318f8f56bc60c5a370481c2d5cfdddd016b177f592a76", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/swarm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "103cb45cba5fe8c37d224f556f14dcef349d075036ca486ce6bd3c79b52622c2", + "format": 1 + }, + { + "name": "plugins/module_utils/common.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48cc023fa7509d5e6fa6fa9963aee9bd962195fd68ceff6239a9fc6007f5515d", + "format": 1 + }, + { + "name": "plugins/module_utils/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/docker.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c68f2135245a172d92843788a9805a82c0bfefadb8772d240a4ff80922abc19", + "format": 1 + }, + { + "name": "plugins/connection", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/connection/docker.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "427e1f8f5872eaec7cc58b27178656552b6b0448313a42fa0aa50579843c2bfa", + "format": 1 + }, + { + "name": "plugins/connection/docker_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f688b2f6647f812c54ee11e7b7599bb77eee2ac2c66a7030cc0bc870be2765e", + "format": 1 + } + ], + "format": 1 +}
\ No newline at end of file diff --git a/collections-debian-merged/ansible_collections/community/docker/MANIFEST.json b/collections-debian-merged/ansible_collections/community/docker/MANIFEST.json new file mode 100644 index 00000000..35d0e72e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/MANIFEST.json @@ -0,0 +1,30 @@ +{ + "collection_info": { + "namespace": "community", + "name": "docker", + "version": "1.2.2", + "authors": [ + "Ansible Docker Working Group" + ], + "readme": "README.md", + "tags": [ + "docker" + ], + "description": "Modules and plugins for working with Docker", + "license": [], + "license_file": "COPYING", + "dependencies": {}, + "repository": "https://github.com/ansible-collections/community.docker", + "documentation": null, + "homepage": "https://github.com/ansible-collections/community.docker", + "issues": "https://github.com/ansible-collections/community.docker/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "60b263ef49ee324bee46d9520d70743b6cd35db6a28665bf39553000705830be", + "format": 1 + }, + "format": 1 +}
\ No newline at end of file diff --git a/collections-debian-merged/ansible_collections/community/docker/README.md b/collections-debian-merged/ansible_collections/community/docker/README.md new file mode 100644 index 00000000..e501b4fe --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/README.md @@ -0,0 +1,93 @@ +# Docker Community Collection + +[![Build Status](https://dev.azure.com/ansible/community.docker/_apis/build/status/CI?branchName=main)](https://dev.azure.com/ansible/community.docker/_build?definitionId=25) +[![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.docker)](https://codecov.io/gh/ansible-collections/community.docker) + +This repo contains the `community.docker` Ansible Collection. The collection includes many modules and plugins to work with Docker. + +## Tested with Ansible + +Tested with the current Ansible 2.9 and 2.10 releases and the current development version of Ansible. Ansible versions before 2.9.10 are not supported. + +## External requirements + +Most modules and plugins require the [Docker SDK for Python](https://pypi.org/project/docker/). For Python 2.6 support, use [the deprecated docker-py library](https://pypi.org/project/docker-py/) instead. + +Both libraries cannot be installed at the same time. If you accidentally did install them simultaneously, you have to uninstall *both* before re-installing one of them. + +## Included content + +* Connection plugins: + - community.docker.docker: use Docker containers as remotes +* Inventory plugins: + - community.docker.docker_machine: collect Docker machines as inventory + - community.docker.docker_swarm: collect Docker Swarm nodes as inventory +* Modules: + * Docker: + - community.docker.docker_container: manage Docker containers + - community.docker.docker_container_info: retrieve information on Docker containers + - community.docker.docker_host_info: retrieve information on the Docker daemon + - community.docker.docker_image: manage Docker images + - community.docker.docker_image_info: retrieve information on Docker images + - community.docker.docker_login: log in and out to/from registries + - community.docker.docker_network: manage Docker networks + - community.docker.docker_network_info: retrieve information on Docker networks + - community.docker.docker_prune: prune Docker containers, images, networks, volumes, and build data + - community.docker.docker_volume: manage Docker volumes + - community.docker.docker_volume_info: retrieve information on Docker volumes + * Docker Compose: + - community.docker.docker_compose: manage Docker Compose files + * Docker Swarm: + - community.docker.docker_config: manage configurations + - community.docker.docker_node: manage Docker Swarm nodes + - community.docker.docker_node_info: retrieve information on Docker Swarm nodes + - community.docker.docker_secret: manage secrets + - community.docker.docker_swarm: manage Docker Swarm + - community.docker.docker_swarm_info: retrieve information on Docker Swarm + - community.docker.docker_swarm_service: manage Docker Swarm services + - community.docker.docker_swarm_service_info: retrieve information on Docker Swarm services + * Docker Stack: + - community.docker.docker_stack: manage Docker Stacks + - community.docker.docker_stack_info: retrieve information on Docker Stacks + - community.docker.docker_stack_task_info: retrieve information on tasks in Docker Stacks + +## Using this collection + +Before using the General community collection, you need to install the collection with the `ansible-galaxy` CLI: + + ansible-galaxy collection install community.docker + +You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml` using the format: + +```yaml +collections: +- name: community.docker +``` + +See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details. + +## Contributing to this collection + +If you want to develop new content for this collection or improve what is already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATH`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there. + +You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html). + +## Release notes + +See the [changelog](https://github.com/ansible-collections/community.docker/tree/main/CHANGELOG.rst). + +## More information + +- [Ansible Collection overview](https://github.com/ansible-collections/overview) +- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html) +- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html) +- [Ansible Collections Checklist](https://github.com/ansible-collections/overview/blob/master/collection_requirements.rst) +- [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) +- [The Bullhorn (the Ansible Contributor newsletter)](https://us19.campaign-archive.com/home/?u=56d874e027110e35dea0e03c1&id=d6635f5420) +- [Changes impacting Contributors](https://github.com/ansible-collections/overview/issues/45) + +## Licensing + +GNU General Public License v3.0 or later. + +See [COPYING](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text. diff --git a/collections-debian-merged/ansible_collections/community/docker/changelogs/changelog.yaml b/collections-debian-merged/ansible_collections/community/docker/changelogs/changelog.yaml new file mode 100644 index 00000000..4a7c7148 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/changelogs/changelog.yaml @@ -0,0 +1,193 @@ +ancestor: null +releases: + 0.1.0: + changes: + bugfixes: + - docker_login - fix internal config file storage to handle credentials for + more than one registry (https://github.com/ansible-collections/community.general/issues/1117). + minor_changes: + - docker_container - now supports the ``device_requests`` option, which allows + to request additional resources such as GPUs (https://github.com/ansible/ansible/issues/65748, + https://github.com/ansible-collections/community.general/pull/1119). + release_summary: 'The ``community.docker`` continues the work on the Ansible + docker modules and plugins from their state in ``community.general`` 1.2.0. + The changes listed here are thus relative to the modules and plugins ``community.general.docker*``. + + + All deprecation removals planned for ``community.general`` 2.0.0 have been + applied. All deprecation removals scheduled for ``community.general`` 3.0.0 + have been re-scheduled for ``community.docker`` 2.0.0. + + ' + removed_features: + - docker_container - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1). + - docker_container - the default of ``networks_cli_compatible`` changed to ``true`` + (https://github.com/ansible-collections/community.docker/pull/1). + - docker_container - the unused option ``trust_image_content`` has been removed + (https://github.com/ansible-collections/community.docker/pull/1). + - docker_image - ``state=build`` has been removed. Use ``present`` instead (https://github.com/ansible-collections/community.docker/pull/1). + - docker_image - the ``container_limits``, ``dockerfile``, ``http_timeout``, + ``nocache``, ``rm``, ``path``, ``buildargs``, ``pull`` have been removed. + Use the corresponding suboptions of ``build`` instead (https://github.com/ansible-collections/community.docker/pull/1). + - docker_image - the ``force`` option has been removed. Use the more specific + ``force_*`` options instead (https://github.com/ansible-collections/community.docker/pull/1). + - docker_image - the ``source`` option is now mandatory (https://github.com/ansible-collections/community.docker/pull/1). + - docker_image - the ``use_tls`` option has been removed. Use ``tls`` and ``validate_certs`` + instead (https://github.com/ansible-collections/community.docker/pull/1). + - docker_image - the default of the ``build.pull`` option changed to ``false`` + (https://github.com/ansible-collections/community.docker/pull/1). + - docker_image_facts - this alias is on longer availabe, use ``docker_image_info`` + instead (https://github.com/ansible-collections/community.docker/pull/1). + - docker_network - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1). + - docker_network - the ``ipam_options`` option has been removed. Use ``ipam_config`` + instead (https://github.com/ansible-collections/community.docker/pull/1). + - docker_service - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1). + - docker_swarm - ``state=inspect`` has been removed. Use ``docker_swarm_info`` + instead (https://github.com/ansible-collections/community.docker/pull/1). + - docker_swarm_service - the ``constraints`` option has been removed. Use ``placement.constraints`` + instead (https://github.com/ansible-collections/community.docker/pull/1). + - docker_swarm_service - the ``limit_cpu`` and ``limit_memory`` options has + been removed. Use the corresponding suboptions in ``limits`` instead (https://github.com/ansible-collections/community.docker/pull/1). + - docker_swarm_service - the ``log_driver`` and ``log_driver_options`` options + has been removed. Use the corresponding suboptions in ``logging`` instead + (https://github.com/ansible-collections/community.docker/pull/1). + - docker_swarm_service - the ``reserve_cpu`` and ``reserve_memory`` options + has been removed. Use the corresponding suboptions in ``reservations`` instead + (https://github.com/ansible-collections/community.docker/pull/1). + - docker_swarm_service - the ``restart_policy``, ``restart_policy_attempts``, + ``restart_policy_delay`` and ``restart_policy_window`` options has been removed. + Use the corresponding suboptions in ``restart_config`` instead (https://github.com/ansible-collections/community.docker/pull/1). + - docker_swarm_service - the ``update_delay``, ``update_parallelism``, ``update_failure_action``, + ``update_monitor``, ``update_max_failure_ratio`` and ``update_order`` options + has been removed. Use the corresponding suboptions in ``update_config`` instead + (https://github.com/ansible-collections/community.docker/pull/1). + - docker_volume - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1). + - docker_volume - the ``force`` option has been removed. Use ``recreate`` instead + (https://github.com/ansible-collections/community.docker/pull/1). + fragments: + - 0.1.0.yml + - c.g-1118-docker_login-config-store.yml + - c.g-1119-docker_container-device-reqests.yml + - c.g-2.0.0-deprecations.yml + release_date: '2020-10-30' + 1.0.0: + changes: + minor_changes: + - Add collection-side support of the ``docker`` action group / module defaults + group (https://github.com/ansible-collections/community.docker/pull/17). + - docker_image - return docker build output (https://github.com/ansible-collections/community.general/pull/805). + - docker_secret - add a warning when the secret does not have an ``ansible_key`` + label but the ``force`` parameter is not set (https://github.com/ansible-collections/community.docker/issues/30, + https://github.com/ansible-collections/community.docker/pull/31). + release_summary: 'This is the first production (non-prerelease) release of ``community.docker``. + + ' + fragments: + - 1.0.0.yml + - 17-action-group.yml + - 31-docker-secret.yml + - community.general-805-docker_image-build-output.yml + release_date: '2020-11-17' + 1.0.1: + changes: + bugfixes: + - docker_container - the validation for ``capabilities`` in ``device_requests`` + was incorrect (https://github.com/ansible-collections/community.docker/issues/42, + https://github.com/ansible-collections/community.docker/pull/43). + release_summary: Maintenance release with a bugfix for ``docker_container``. + fragments: + - 1.0.1.yml + - 43-docker_container-device_requests.yml + release_date: '2020-12-11' + 1.1.0: + changes: + bugfixes: + - docker_image - if ``push=true`` is used with ``repository``, and the image + does not need to be tagged, still push. This can happen if ``repository`` + and ``name`` are equal (https://github.com/ansible-collections/community.docker/issues/52, + https://github.com/ansible-collections/community.docker/pull/53). + - docker_image - report error when loading a broken archive that contains no + image (https://github.com/ansible-collections/community.docker/issues/46, + https://github.com/ansible-collections/community.docker/pull/55). + - docker_image - report error when the loaded archive does not contain the specified + image (https://github.com/ansible-collections/community.docker/issues/41, + https://github.com/ansible-collections/community.docker/pull/55). + deprecated_features: + - docker_container - currently ``published_ports`` can contain port mappings + next to the special value ``all``, in which case the port mappings are ignored. + This behavior is deprecated for community.docker 2.0.0, at which point it + will either be forbidden, or this behavior will be properly implemented similar + to how the Docker CLI tool handles this (https://github.com/ansible-collections/community.docker/issues/8, + https://github.com/ansible-collections/community.docker/pull/60). + minor_changes: + - docker_container - support specifying ``cgroup_parent`` (https://github.com/ansible-collections/community.docker/issues/6, + https://github.com/ansible-collections/community.docker/pull/59). + - docker_container - when a container is started with ``detached=false``, ``status`` + is now also returned when it is 0 (https://github.com/ansible-collections/community.docker/issues/26, + https://github.com/ansible-collections/community.docker/pull/58). + - docker_image - support ``platform`` when building images (https://github.com/ansible-collections/community.docker/issues/22, + https://github.com/ansible-collections/community.docker/pull/54). + release_summary: Feature release with three new plugins and modules. + fragments: + - 1.1.0.yml + - 53-docker_image-tag-push.yml + - 54-docker_image-build-platform.yml + - 55-docker_image-loading.yml + - 58-docker_container-non-detached-status.yml + - 59-docker_container-cgroup-parent.yml + - 60-docker_container-publish-all.yml + modules: + - description: Return facts about whether the module runs in a Docker container + name: current_container_facts + namespace: '' + plugins: + connection: + - description: Run tasks in docker containers + name: docker_api + namespace: null + inventory: + - description: Ansible dynamic inventory plugin for Docker containers. + name: docker_containers + namespace: null + release_date: '2021-01-03' + 1.2.0: + changes: + bugfixes: + - docker_container - allow IPv6 zones (RFC 4007) in bind IPs (https://github.com/ansible-collections/community.docker/pull/66). + - docker_image - fix crash on loading images with versions of Docker SDK for + Python before 2.5.0 (https://github.com/ansible-collections/community.docker/issues/72, + https://github.com/ansible-collections/community.docker/pull/73). + minor_changes: + - docker_container - added ``default_host_ip`` option which allows to explicitly + set the default IP string for published ports without explicitly specified + IPs. When using IPv6 binds with Docker 20.10.2 or newer, this needs to be + set to an empty string (``""``) (https://github.com/ansible-collections/community.docker/issues/70, + https://github.com/ansible-collections/community.docker/pull/71). + release_summary: Feature release with one new feature and two bugfixes. + fragments: + - 1.2.0.yml + - 66-ipv6-zones.yml + - 71-docker_container-default_host_ip.yml + - 73-docker_image-fix-old-docker-py-version.yml + release_date: '2021-01-25' + 1.2.1: + changes: + bugfixes: + - docker connection plugin - fix Docker version parsing, as some docker versions + have a leading ``v`` in the output of the command ``docker version --format + "{{.Server.Version}}"`` (https://github.com/ansible-collections/community.docker/pull/76). + release_summary: Bugfix release. + fragments: + - 1.2.1.yml + - 76-leading-v-support-in-docker-version.yml + release_date: '2021-01-28' + 1.2.2: + changes: + release_summary: Security bugfix release to address CVE-2021-20191. + security_fixes: + - docker_swarm - enabled ``no_log`` for the option ``signing_ca_key`` to prevent + accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.docker/pull/80). + fragments: + - 1.2.2.yml + - CVE-2021-20191_no_log.yml + release_date: '2021-02-05' diff --git a/collections-debian-merged/ansible_collections/community/docker/changelogs/config.yaml b/collections-debian-merged/ansible_collections/community/docker/changelogs/config.yaml new file mode 100644 index 00000000..120c7a5b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/changelogs/config.yaml @@ -0,0 +1,29 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: Docker Community Collection +trivial_section_name: trivial diff --git a/collections-debian-merged/ansible_collections/community/docker/changelogs/fragments/.keep b/collections-debian-merged/ansible_collections/community/docker/changelogs/fragments/.keep new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/changelogs/fragments/.keep diff --git a/collections-debian-merged/ansible_collections/community/docker/meta/runtime.yml b/collections-debian-merged/ansible_collections/community/docker/meta/runtime.yml new file mode 100644 index 00000000..4c30dbc2 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/meta/runtime.yml @@ -0,0 +1,24 @@ +--- +requires_ansible: '>=2.9.10' +action_groups: + docker: + - docker_compose + - docker_config + - docker_container + - docker_container_info + - docker_host_info + - docker_image + - docker_image_info + - docker_login + - docker_network + - docker_network_info + - docker_node + - docker_node_info + - docker_prune + - docker_secret + - docker_swarm + - docker_swarm_info + - docker_swarm_service + - docker_swarm_service_info + - docker_volume + - docker_volume_info diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/connection/docker.py b/collections-debian-merged/ansible_collections/community/docker/plugins/connection/docker.py new file mode 100644 index 00000000..71e00761 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/connection/docker.py @@ -0,0 +1,370 @@ +# Based on the chroot connection plugin by Maykel Moya +# +# (c) 2014, Lorin Hochstein +# (c) 2015, Leendert Brouwer (https://github.com/objectified) +# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com> +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: + - Lorin Hochestein (!UNKNOWN) + - Leendert Brouwer (!UNKNOWN) + name: docker + short_description: Run tasks in docker containers + description: + - Run commands or put/fetch files to an existing docker container. + - Uses the Docker CLI to execute commands in the container. If you prefer + to directly connect to the Docker daemon, use the + R(community.docker.docker_api,ansible_collections.community.docker.docker_api_connection) + connection plugin. + options: + remote_user: + description: + - The user to execute as inside the container + vars: + - name: ansible_user + - name: ansible_docker_user + docker_extra_args: + description: + - Extra arguments to pass to the docker command line + default: '' + remote_addr: + description: + - The name of the container you want to access. + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_docker_host +''' + +import distutils.spawn +import fcntl +import os +import os.path +import subprocess +import re + +from distutils.version import LooseVersion + +import ansible.constants as C +from ansible.compat import selectors +from ansible.errors import AnsibleError, AnsibleFileNotFound +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.plugins.connection import ConnectionBase, BUFSIZE +from ansible.utils.display import Display + +display = Display() + + +class Connection(ConnectionBase): + ''' Local docker based connections ''' + + transport = 'community.docker.docker' + has_pipelining = True + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + # Note: docker supports running as non-root in some configurations. + # (For instance, setting the UNIX socket file to be readable and + # writable by a specific UNIX group and then putting users into that + # group). Therefore we don't check that the user is root when using + # this connection. But if the user is getting a permission denied + # error it probably means that docker on their system is only + # configured to be connected to by root and they are not running as + # root. + + # Windows uses Powershell modules + if getattr(self._shell, "_IS_WINDOWS", False): + self.module_implementation_preferences = ('.ps1', '.exe', '') + + if 'docker_command' in kwargs: + self.docker_cmd = kwargs['docker_command'] + else: + self.docker_cmd = distutils.spawn.find_executable('docker') + if not self.docker_cmd: + raise AnsibleError("docker command not found in PATH") + + docker_version = self._get_docker_version() + if docker_version == u'dev': + display.warning(u'Docker version number is "dev". Will assume latest version.') + if docker_version != u'dev' and LooseVersion(docker_version) < LooseVersion(u'1.3'): + raise AnsibleError('docker connection type requires docker 1.3 or higher') + + # The remote user we will request from docker (if supported) + self.remote_user = None + # The actual user which will execute commands in docker (if known) + self.actual_user = None + + if self._play_context.remote_user is not None: + if docker_version == u'dev' or LooseVersion(docker_version) >= LooseVersion(u'1.7'): + # Support for specifying the exec user was added in docker 1.7 + self.remote_user = self._play_context.remote_user + self.actual_user = self.remote_user + else: + self.actual_user = self._get_docker_remote_user() + + if self.actual_user != self._play_context.remote_user: + display.warning(u'docker {0} does not support remote_user, using container default: {1}' + .format(docker_version, self.actual_user or u'?')) + elif self._display.verbosity > 2: + # Since we're not setting the actual_user, look it up so we have it for logging later + # Only do this if display verbosity is high enough that we'll need the value + # This saves overhead from calling into docker when we don't need to + self.actual_user = self._get_docker_remote_user() + + @staticmethod + def _sanitize_version(version): + version = re.sub(u'[^0-9a-zA-Z.]', u'', version) + version = re.sub(u'^v', u'', version) + return version + + def _old_docker_version(self): + cmd_args = [] + if self._play_context.docker_extra_args: + cmd_args += self._play_context.docker_extra_args.split(' ') + + old_version_subcommand = ['version'] + + old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand + p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + cmd_output, err = p.communicate() + + return old_docker_cmd, to_native(cmd_output), err, p.returncode + + def _new_docker_version(self): + # no result yet, must be newer Docker version + cmd_args = [] + if self._play_context.docker_extra_args: + cmd_args += self._play_context.docker_extra_args.split(' ') + + new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"] + + new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand + p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + cmd_output, err = p.communicate() + return new_docker_cmd, to_native(cmd_output), err, p.returncode + + def _get_docker_version(self): + + cmd, cmd_output, err, returncode = self._old_docker_version() + if returncode == 0: + for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'): + if line.startswith(u'Server version:'): # old docker versions + return self._sanitize_version(line.split()[2]) + + cmd, cmd_output, err, returncode = self._new_docker_version() + if returncode: + raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err))) + + return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict')) + + def _get_docker_remote_user(self): + """ Get the default user configured in the docker container """ + p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', self._play_context.remote_addr], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + out, err = p.communicate() + out = to_text(out, errors='surrogate_or_strict') + + if p.returncode != 0: + display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err))) + return None + + # The default exec user is root, unless it was changed in the Dockerfile with USER + return out.strip() or u'root' + + def _build_exec_cmd(self, cmd): + """ Build the local docker exec command to run cmd on remote_host + + If remote_user is available and is supported by the docker + version we are using, it will be provided to docker exec. + """ + + local_cmd = [self.docker_cmd] + + if self._play_context.docker_extra_args: + local_cmd += self._play_context.docker_extra_args.split(' ') + + local_cmd += [b'exec'] + + if self.remote_user is not None: + local_cmd += [b'-u', self.remote_user] + + # -i is needed to keep stdin open which allows pipelining to work + local_cmd += [b'-i', self._play_context.remote_addr] + cmd + + return local_cmd + + def _connect(self, port=None): + """ Connect to the container. Nothing to do """ + super(Connection, self)._connect() + if not self._connected: + display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format( + self.actual_user or u'?'), host=self._play_context.remote_addr + ) + self._connected = True + + def exec_command(self, cmd, in_data=None, sudoable=False): + """ Run a command on the docker host """ + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd]) + + display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self._play_context.remote_addr) + display.debug("opening command with Popen()") + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + + p = subprocess.Popen( + local_cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + display.debug("done running command with Popen()") + + if self.become and self.become.expect_prompt() and sudoable: + fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) + fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) + selector = selectors.DefaultSelector() + selector.register(p.stdout, selectors.EVENT_READ) + selector.register(p.stderr, selectors.EVENT_READ) + + become_output = b'' + try: + while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output): + events = selector.select(self._play_context.timeout) + if not events: + stdout, stderr = p.communicate() + raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output)) + + for key, event in events: + if key.fileobj == p.stdout: + chunk = p.stdout.read() + elif key.fileobj == p.stderr: + chunk = p.stderr.read() + + if not chunk: + stdout, stderr = p.communicate() + raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output)) + become_output += chunk + finally: + selector.close() + + if not self.become.check_success(become_output): + become_pass = self.become.get_option('become_pass', playcontext=self._play_context) + p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n') + fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) + fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) + + display.debug("getting output with communicate()") + stdout, stderr = p.communicate(in_data) + display.debug("done communicating") + + display.debug("done with docker.exec_command()") + return (p.returncode, stdout, stderr) + + def _prefix_login_path(self, remote_path): + ''' Make sure that we put files into a standard path + + If a path is relative, then we need to choose where to put it. + ssh chooses $HOME but we aren't guaranteed that a home dir will + exist in any given chroot. So for now we're choosing "/" instead. + This also happens to be the former default. + + Can revisit using $HOME instead if it's a problem + ''' + if getattr(self._shell, "_IS_WINDOWS", False): + import ntpath + return ntpath.normpath(remote_path) + else: + if not remote_path.startswith(os.path.sep): + remote_path = os.path.join(os.path.sep, remote_path) + return os.path.normpath(remote_path) + + def put_file(self, in_path, out_path): + """ Transfer a file from local to docker container """ + super(Connection, self).put_file(in_path, out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) + + out_path = self._prefix_login_path(out_path) + if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound( + "file or module does not exist: %s" % to_native(in_path)) + + out_path = shlex_quote(out_path) + # Older docker doesn't have native support for copying files into + # running containers, so we use docker exec to implement this + # Although docker version 1.8 and later provide support, the + # owner and group of the files are always set to root + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: + if not os.fstat(in_file.fileno()).st_size: + count = ' count=0' + else: + count = '' + args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)]) + args = [to_bytes(i, errors='surrogate_or_strict') for i in args] + try: + p = subprocess.Popen(args, stdin=in_file, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except OSError: + raise AnsibleError("docker connection requires dd command in the container to put files") + stdout, stderr = p.communicate() + + if p.returncode != 0: + raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % + (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr))) + + def fetch_file(self, in_path, out_path): + """ Fetch a file from container to local. """ + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) + + in_path = self._prefix_login_path(in_path) + # out_path is the final file path, but docker takes a directory, not a + # file path + out_dir = os.path.dirname(out_path) + + args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir] + args = [to_bytes(i, errors='surrogate_or_strict') for i in args] + + p = subprocess.Popen(args, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p.communicate() + + if getattr(self._shell, "_IS_WINDOWS", False): + import ntpath + actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path)) + else: + actual_out_path = os.path.join(out_dir, os.path.basename(in_path)) + + if p.returncode != 0: + # Older docker doesn't have native support for fetching files command `cp` + # If `cp` fails, try to use `dd` instead + args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)]) + args = [to_bytes(i, errors='surrogate_or_strict') for i in args] + with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file: + try: + p = subprocess.Popen(args, stdin=subprocess.PIPE, + stdout=out_file, stderr=subprocess.PIPE) + except OSError: + raise AnsibleError("docker connection requires dd command in the container to put files") + stdout, stderr = p.communicate() + + if p.returncode != 0: + raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + + # Rename if needed + if actual_out_path != out_path: + os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict')) + + def close(self): + """ Terminate the connection. Nothing to do for Docker""" + super(Connection, self).close() + self._connected = False diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/connection/docker_api.py b/collections-debian-merged/ansible_collections/community/docker/plugins/connection/docker_api.py new file mode 100644 index 00000000..d1cccf81 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/connection/docker_api.py @@ -0,0 +1,386 @@ +# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +author: + - Felix Fontein (@felixfontein) +name: docker_api +short_description: Run tasks in docker containers +version_added: 1.1.0 +description: + - Run commands or put/fetch files to an existing docker container. + - Uses Docker SDK for Python to interact directly with the Docker daemon instead of + using the Docker CLI. Use the + R(community.docker.docker,ansible_collections.community.docker.docker_connection) + connection plugin if you want to use the Docker CLI. +options: + remote_user: + type: str + description: + - The user to execute as inside the container. + vars: + - name: ansible_user + - name: ansible_docker_user + remote_addr: + type: str + description: + - The name of the container you want to access. + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_docker_host + + # The following are options from the docs fragment. We want to allow the user to + # specify them with Ansible variables. + docker_host: + vars: + - name: ansible_docker_docker_host + tls_hostname: + vars: + - name: ansible_docker_tls_hostname + api_version: + vars: + - name: ansible_docker_api_version + timeout: + vars: + - name: ansible_docker_timeout + ca_cert: + vars: + - name: ansible_docker_ca_cert + client_cert: + vars: + - name: ansible_docker_client_cert + client_key: + vars: + - name: ansible_docker_client_key + ssl_version: + vars: + - name: ansible_docker_ssl_version + tls: + vars: + - name: ansible_docker_tls + validate_certs: + vars: + - name: ansible_docker_validate_certs + +extends_documentation_fragment: + - community.docker.docker + - community.docker.docker.docker_py_1_documentation +''' + +import io +import os +import os.path +import shutil +import tarfile + +from ansible.errors import AnsibleFileNotFound, AnsibleConnectionFailure +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.plugins.connection import ConnectionBase +from ansible.utils.display import Display + +from ansible_collections.community.docker.plugins.module_utils.common import ( + RequestException, +) +from ansible_collections.community.docker.plugins.plugin_utils.socket_handler import ( + DockerSocketHandler, +) +from ansible_collections.community.docker.plugins.plugin_utils.common import ( + AnsibleDockerClient, +) + +try: + from docker.errors import DockerException, APIError, NotFound +except Exception: + # missing Docker SDK for Python handled in ansible_collections.community.docker.plugins.module_utils.common + pass + +MIN_DOCKER_PY = '1.7.0' +MIN_DOCKER_API = None + + +display = Display() + + +class Connection(ConnectionBase): + ''' Local docker based connections ''' + + transport = 'community.docker.docker_api' + has_pipelining = True + + def _call_client(self, play_context, callable, not_found_can_be_resource=False): + try: + return callable() + except NotFound as e: + if not_found_can_be_resource: + raise AnsibleConnectionFailure('Could not find container "{1}" or resource in it ({0})'.format(e, play_context.remote_addr)) + else: + raise AnsibleConnectionFailure('Could not find container "{1}" ({0})'.format(e, play_context.remote_addr)) + except APIError as e: + if e.response and e.response.status_code == 409: + raise AnsibleConnectionFailure('The container "{1}" has been paused ({0})'.format(e, play_context.remote_addr)) + self.client.fail( + 'An unexpected docker error occurred for container "{1}": {0}'.format(e, play_context.remote_addr) + ) + except DockerException as e: + self.client.fail( + 'An unexpected docker error occurred for container "{1}": {0}'.format(e, play_context.remote_addr) + ) + except RequestException as e: + self.client.fail( + 'An unexpected requests error occurred for container "{1}" when docker-py tried to talk to the docker daemon: {0}' + .format(e, play_context.remote_addr) + ) + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + self.client = None + self.ids = dict() + + # Windows uses Powershell modules + if getattr(self._shell, "_IS_WINDOWS", False): + self.module_implementation_preferences = ('.ps1', '.exe', '') + + self.actual_user = play_context.remote_user + + def _connect(self, port=None): + """ Connect to the container. Nothing to do """ + super(Connection, self)._connect() + if not self._connected: + display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format( + self.actual_user or u'?'), host=self._play_context.remote_addr + ) + if self.client is None: + self.client = AnsibleDockerClient(self, min_docker_version=MIN_DOCKER_PY, min_docker_api_version=MIN_DOCKER_API) + self._connected = True + + if self.actual_user is None and display.verbosity > 2: + # Since we're not setting the actual_user, look it up so we have it for logging later + # Only do this if display verbosity is high enough that we'll need the value + # This saves overhead from calling into docker when we don't need to + display.vvv(u"Trying to determine actual user") + result = self._call_client(self._play_context, lambda: self.client.inspect_container(self._play_context.remote_addr)) + if result.get('Config'): + self.actual_user = result['Config'].get('User') + if self.actual_user is not None: + display.vvv(u"Actual user is '{0}'".format(self.actual_user)) + + def exec_command(self, cmd, in_data=None, sudoable=False): + """ Run a command on the docker host """ + + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + command = [self._play_context.executable, '-c', to_text(cmd)] + + do_become = self.become and self.become.expect_prompt() and sudoable + + display.vvv( + u"EXEC {0}{1}{2}".format( + to_text(command), + ', with stdin ({0} bytes)'.format(len(in_data)) if in_data is not None else '', + ', with become prompt' if do_become else '', + ), + host=self._play_context.remote_addr + ) + + need_stdin = True if (in_data is not None) or do_become else False + + exec_data = self._call_client(self._play_context, lambda: self.client.exec_create( + self._play_context.remote_addr, + command, + stdout=True, + stderr=True, + stdin=need_stdin, + user=self._play_context.remote_user or '', + workdir=None, + )) + exec_id = exec_data['Id'] + + if need_stdin: + exec_socket = self._call_client(self._play_context, lambda: self.client.exec_start( + exec_id, + detach=False, + socket=True, + )) + try: + with DockerSocketHandler(display, exec_socket, container=self._play_context.remote_addr) as exec_socket_handler: + if do_become: + become_output = [b''] + + def append_become_output(stream_id, data): + become_output[0] += data + + exec_socket_handler.set_block_done_callback(append_become_output) + + while not self.become.check_success(become_output[0]) and not self.become.check_password_prompt(become_output[0]): + if not exec_socket_handler.select(self._play_context.timeout): + stdout, stderr = exec_socket_handler.consume() + raise AnsibleConnectionFailure('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output[0])) + + if exec_socket_handler.is_eof(): + raise AnsibleConnectionFailure('privilege output closed while waiting for password prompt:\n' + to_native(become_output[0])) + + if not self.become.check_success(become_output[0]): + become_pass = self.become.get_option('become_pass', playcontext=self._play_context) + exec_socket_handler.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n') + + if in_data is not None: + exec_socket_handler.write(in_data) + + stdout, stderr = exec_socket_handler.consume() + finally: + exec_socket.close() + else: + stdout, stderr = self._call_client(self._play_context, lambda: self.client.exec_start( + exec_id, + detach=False, + stream=False, + socket=False, + demux=True, + )) + + result = self._call_client(self._play_context, lambda: self.client.exec_inspect(exec_id)) + + return result.get('ExitCode') or 0, stdout or b'', stderr or b'' + + def _prefix_login_path(self, remote_path): + ''' Make sure that we put files into a standard path + + If a path is relative, then we need to choose where to put it. + ssh chooses $HOME but we aren't guaranteed that a home dir will + exist in any given chroot. So for now we're choosing "/" instead. + This also happens to be the former default. + + Can revisit using $HOME instead if it's a problem + ''' + if getattr(self._shell, "_IS_WINDOWS", False): + import ntpath + return ntpath.normpath(remote_path) + else: + if not remote_path.startswith(os.path.sep): + remote_path = os.path.join(os.path.sep, remote_path) + return os.path.normpath(remote_path) + + def put_file(self, in_path, out_path): + """ Transfer a file from local to docker container """ + super(Connection, self).put_file(in_path, out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) + + out_path = self._prefix_login_path(out_path) + if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound( + "file or module does not exist: %s" % to_native(in_path)) + + if self.actual_user not in self.ids: + dummy, ids, dummy = self.exec_command(b'id -u && id -g') + try: + user_id, group_id = ids.splitlines() + self.ids[self.actual_user] = int(user_id), int(group_id) + display.vvvv( + 'PUT: Determined uid={0} and gid={1} for user "{2}"'.format(user_id, group_id, self.actual_user), + host=self._play_context.remote_addr + ) + except Exception as e: + raise AnsibleConnectionFailure( + 'Error while determining user and group ID of current user in container "{1}": {0}\nGot value: {2!r}' + .format(e, self._play_context.remote_addr, ids) + ) + + b_in_path = to_bytes(in_path, errors='surrogate_or_strict') + + out_dir, out_file = os.path.split(out_path) + + # TODO: stream tar file, instead of creating it in-memory into a BytesIO + + bio = io.BytesIO() + with tarfile.open(fileobj=bio, mode='w|', dereference=True, encoding='utf-8') as tar: + # Note that without both name (bytes) and arcname (unicode), this either fails for + # Python 2.6/2.7, Python 3.5/3.6, or Python 3.7+. Only when passing both (in this + # form) it works with Python 2.6, 2.7, 3.5, 3.6, and 3.7 up to 3.9. + tarinfo = tar.gettarinfo(b_in_path, arcname=to_text(out_file)) + user_id, group_id = self.ids[self.actual_user] + tarinfo.uid = user_id + tarinfo.uname = '' + if self.actual_user: + tarinfo.uname = self.actual_user + tarinfo.gid = group_id + tarinfo.gname = '' + tarinfo.mode &= 0o700 + with open(b_in_path, 'rb') as f: + tar.addfile(tarinfo, fileobj=f) + data = bio.getvalue() + + ok = self._call_client(self._play_context, lambda: self.client.put_archive( + self._play_context.remote_addr, + out_dir, + data, # can also be file object for streaming; this is only clear from the + # implementation of put_archive(), which uses requests's put(). + # See https://2.python-requests.org/en/master/user/advanced/#streaming-uploads + # WARNING: might not work with all transports! + ), not_found_can_be_resource=True) + if not ok: + raise AnsibleConnectionFailure( + 'Unknown error while creating file "{0}" in container "{1}".' + .format(out_path, self._play_context.remote_addr) + ) + + def fetch_file(self, in_path, out_path): + """ Fetch a file from container to local. """ + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) + + in_path = self._prefix_login_path(in_path) + b_out_path = to_bytes(out_path, errors='surrogate_or_strict') + + considered_in_paths = set() + + while True: + if in_path in considered_in_paths: + raise AnsibleConnectionFailure('Found infinite symbolic link loop when trying to fetch "{0}"'.format(in_path)) + considered_in_paths.add(in_path) + + display.vvvv('FETCH: Fetching "%s"' % in_path, host=self._play_context.remote_addr) + stream, stats = self._call_client(self._play_context, lambda: self.client.get_archive( + self._play_context.remote_addr, + in_path, + ), not_found_can_be_resource=True) + + # TODO: stream tar file instead of downloading it into a BytesIO + + bio = io.BytesIO() + for chunk in stream: + bio.write(chunk) + bio.seek(0) + + with tarfile.open(fileobj=bio, mode='r|') as tar: + symlink_member = None + first = True + for member in tar: + if not first: + raise AnsibleConnectionFailure('Received tarfile contains more than one file!') + first = False + if member.issym(): + symlink_member = member + continue + if not member.isfile(): + raise AnsibleConnectionFailure('Remote file "%s" is not a regular file or a symbolic link' % in_path) + in_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`... + with open(b_out_path, 'wb') as out_f: + shutil.copyfileobj(in_f, out_f, member.size) + if first: + raise AnsibleConnectionFailure('Received tarfile is empty!') + # If the only member was a file, it's already extracted. If it is a symlink, process it now. + if symlink_member is not None: + in_path = os.path.join(os.path.split(in_path)[0], symlink_member.linkname) + display.vvvv('FETCH: Following symbolic link to "%s"' % in_path, host=self._play_context.remote_addr) + continue + return + + def close(self): + """ Terminate the connection. Nothing to do for Docker""" + super(Connection, self).close() + self._connected = False diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/doc_fragments/docker.py b/collections-debian-merged/ansible_collections/community/docker/plugins/doc_fragments/docker.py new file mode 100644 index 00000000..f0d06e64 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/doc_fragments/docker.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Docker doc fragment + DOCUMENTATION = r''' + +options: + docker_host: + description: + - The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the + TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection, + the module will automatically replace C(tcp) in the connection URL with C(https). + - If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used + instead. If the environment variable is not set, the default value will be used. + type: str + default: unix://var/run/docker.sock + aliases: [ docker_url ] + tls_hostname: + description: + - When verifying the authenticity of the Docker Host server, provide the expected name of the server. + - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will + be used instead. If the environment variable is not set, the default value will be used. + type: str + default: localhost + api_version: + description: + - The version of the Docker API running on the Docker Host. + - Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon. + - If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be + used instead. If the environment variable is not set, the default value will be used. + type: str + default: auto + aliases: [ docker_api_version ] + timeout: + description: + - The maximum amount of time in seconds to wait on a response from the API. + - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used + instead. If the environment variable is not set, the default value will be used. + type: int + default: 60 + ca_cert: + description: + - Use a CA certificate when performing server verification by providing the path to a CA certificate file. + - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set, + the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used. + type: path + aliases: [ tls_ca_cert, cacert_path ] + client_cert: + description: + - Path to the client's TLS certificate file. + - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set, + the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used. + type: path + aliases: [ tls_client_cert, cert_path ] + client_key: + description: + - Path to the client's TLS key file. + - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set, + the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used. + type: path + aliases: [ tls_client_key, key_path ] + ssl_version: + description: + - Provide a valid SSL version number. Default value determined by ssl.py module. + - If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be + used instead. + type: str + tls: + description: + - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host + server. Note that if I(validate_certs) is set to C(yes) as well, it will take precedence. + - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used + instead. If the environment variable is not set, the default value will be used. + type: bool + default: no + validate_certs: + description: + - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. + - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be + used instead. If the environment variable is not set, the default value will be used. + type: bool + default: no + aliases: [ tls_verify ] + debug: + description: + - Debug mode + type: bool + default: no + +notes: + - Connect to the Docker daemon by providing parameters with each task or by defining environment variables. + You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION), + C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped + with the product that sets up the environment. It will set these variables for you. See + U(https://docs.docker.com/machine/reference/env/) for more details. + - When connecting to Docker daemon with TLS, you might need to install additional Python packages. + For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip). + - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions. + In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified, + and use C($DOCKER_CONFIG/config.json) otherwise. +''' + + # Additional, more specific stuff for minimal Docker SDK for Python version < 2.0 + + DOCKER_PY_1_DOCUMENTATION = r''' +options: {} +notes: + - This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to + communicate with the Docker daemon. +requirements: + - "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/) + Python module has been superseded by L(docker,https://pypi.org/project/docker/) + (see L(here,https://github.com/docker/docker-py/issues/1310) for details). + For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to + install the C(docker) Python module. Note that both modules should *not* + be installed at the same time. Also note that when both modules are installed + and one of them is uninstalled, the other might no longer function and a + reinstall of it is required." +''' + + # Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0. + # Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer. + + DOCKER_PY_2_DOCUMENTATION = r''' +options: {} +notes: + - This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to + communicate with the Docker daemon. +requirements: + - "Python >= 2.7" + - "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/) + Python module has been superseded by L(docker,https://pypi.org/project/docker/) + (see L(here,https://github.com/docker/docker-py/issues/1310) for details). + This module does *not* work with docker-py." +''' diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_containers.py b/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_containers.py new file mode 100644 index 00000000..ef2697a6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_containers.py @@ -0,0 +1,321 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, Felix Fontein <felix@fontein.de> +# For the parts taken from the docker inventory script: +# Copyright (c) 2016, Paul Durivage <paul.durivage@gmail.com> +# Copyright (c) 2016, Chris Houseknecht <house@redhat.com> +# Copyright (c) 2016, James Tanner <jtanner@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +DOCUMENTATION = ''' +name: docker_containers +short_description: Ansible dynamic inventory plugin for Docker containers. +version_added: 1.1.0 +author: + - Felix Fontein (@felixfontein) +requirements: + - L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 +extends_documentation_fragment: + - ansible.builtin.constructed + - community.docker.docker + - community.docker.docker.docker_py_1_documentation +description: + - Reads inventories from the Docker API. + - Uses a YAML configuration file that ends with C(docker.[yml|yaml]). +options: + plugin: + description: + - The name of this plugin, it should always be set to C(community.docker.docker_containers) + for this plugin to recognize it as it's own. + type: str + required: true + choices: [ community.docker.docker_containers ] + + connection_type: + description: + - Which connection type to use the containers. + - Default is to use SSH (C(ssh)). For this, the options I(default_ip) and + I(private_ssh_port) are used. + - Alternatively, C(docker-cli) selects the + R(docker connection plugin,ansible_collections.community.docker.docker_connection), + and C(docker-api) selects the + R(docker_api connection plugin,ansible_collections.community.docker.docker_api_connection). + type: str + default: docker-api + choices: + - ssh + - docker-cli + - docker-api + + verbose_output: + description: + - Toggle to (not) include all available inspection metadata. + - Note that all top-level keys will be transformed to the format C(docker_xxx). + For example, C(HostConfig) is converted to C(docker_hostconfig). + - If this is C(false), these values can only be used during I(constructed), I(groups), and I(keyed_groups). + - The C(docker) inventory script always added these variables, so for compatibility set this to C(true). + type: bool + default: false + + default_ip: + description: + - The IP address to assign to ansible_host when the container's SSH port is mapped to interface + '0.0.0.0'. + - Only used if I(connection_type) is C(ssh). + type: str + default: 127.0.0.1 + + private_ssh_port: + description: + - The port containers use for SSH. + - Only used if I(connection_type) is C(ssh). + type: int + default: 22 + + add_legacy_groups: + description: + - "Add the same groups as the C(docker) inventory script does. These are the following:" + - "C(<container id>): contains the container of this ID." + - "C(<container name>): contains the container that has this name." + - "C(<container short id>): contains the containers that have this short ID (first 13 letters of ID)." + - "C(image_<image name>): contains the containers that have the image C(<image name>)." + - "C(stack_<stack name>): contains the containers that belong to the stack C(<stack name>)." + - "C(service_<service name>): contains the containers that belong to the service C(<service name>)" + - "C(<docker_host>): contains the containers which belong to the Docker daemon I(docker_host). + Useful if you run this plugin against multiple Docker daemons." + - "C(running): contains all containers that are running." + - "C(stopped): contains all containers that are not running." + - If this is not set to C(true), you should use keyed groups to add the containers to groups. + See the examples for how to do that. + type: bool + default: false +''' + +EXAMPLES = ''' +# Minimal example using local Docker daemon +plugin: community.docker.docker_containers +docker_host: unix://var/run/docker.sock + +# Minimal example using remote Docker daemon +plugin: community.docker.docker_containers +docker_host: tcp://my-docker-host:2375 + +# Example using remote Docker daemon with unverified TLS +plugin: community.docker.docker_containers +docker_host: tcp://my-docker-host:2376 +tls: true + +# Example using remote Docker daemon with verified TLS and client certificate verification +plugin: community.docker.docker_containers +docker_host: tcp://my-docker-host:2376 +validate_certs: true +ca_cert: /somewhere/ca.pem +client_key: /somewhere/key.pem +client_cert: /somewhere/cert.pem + +# Example using constructed features to create groups +plugin: community.docker.docker_containers +docker_host: tcp://my-docker-host:2375 +strict: false +keyed_groups: + # Add containers with primary network foo to a network_foo group + - prefix: network + key: 'docker_hostconfig.NetworkMode' + # Add Linux hosts to an os_linux group + - prefix: os + key: docker_platform +''' + +import re + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native +from ansible_collections.community.docker.plugins.module_utils.common import update_tls_hostname, get_connect_params +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible.parsing.utils.addresses import parse_address + +from ansible_collections.community.docker.plugins.module_utils.common import ( + RequestException, +) +from ansible_collections.community.docker.plugins.plugin_utils.common import ( + AnsibleDockerClient, +) + +try: + from docker.errors import DockerException, APIError, NotFound +except Exception: + # missing Docker SDK for Python handled in ansible_collections.community.docker.plugins.module_utils.common + pass + +MIN_DOCKER_PY = '1.7.0' +MIN_DOCKER_API = None + + +class InventoryModule(BaseInventoryPlugin, Constructable): + ''' Host inventory parser for ansible using Docker daemon as source. ''' + + NAME = 'community.docker.docker_containers' + + def _slugify(self, value): + return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) + + def _populate(self, client): + strict = self.get_option('strict') + + ssh_port = self.get_option('private_ssh_port') + default_ip = self.get_option('default_ip') + hostname = self.get_option('docker_host') + verbose_output = self.get_option('verbose_output') + connection_type = self.get_option('connection_type') + add_legacy_groups = self.get_option('add_legacy_groups') + + try: + containers = client.containers(all=True) + except APIError as exc: + raise AnsibleError("Error listing containers: %s" % to_native(exc)) + + if add_legacy_groups: + self.inventory.add_group('running') + self.inventory.add_group('stopped') + + for container in containers: + id = container.get('Id') + short_id = id[:13] + + try: + name = container.get('Names', list())[0].lstrip('/') + full_name = name + except IndexError: + name = short_id + full_name = id + + self.inventory.add_host(name) + facts = dict( + docker_name=name, + docker_short_id=short_id + ) + full_facts = dict() + + try: + inspect = client.inspect_container(id) + except APIError as exc: + raise AnsibleError("Error inspecting container %s - %s" % (name, str(exc))) + + state = inspect.get('State') or dict() + config = inspect.get('Config') or dict() + labels = config.get('Labels') or dict() + + running = state.get('Running') + + # Add container to groups + image_name = config.get('Image') + if image_name and add_legacy_groups: + self.inventory.add_group('image_{0}'.format(image_name)) + self.inventory.add_host(name, group='image_{0}'.format(image_name)) + + stack_name = labels.get('com.docker.stack.namespace') + if stack_name: + full_facts['docker_stack'] = stack_name + if add_legacy_groups: + self.inventory.add_group('stack_{0}'.format(stack_name)) + self.inventory.add_host(name, group='stack_{0}'.format(stack_name)) + + service_name = labels.get('com.docker.swarm.service.name') + if service_name: + full_facts['docker_service'] = service_name + if add_legacy_groups: + self.inventory.add_group('service_{0}'.format(service_name)) + self.inventory.add_host(name, group='service_{0}'.format(service_name)) + + if connection_type == 'ssh': + # Figure out ssh IP and Port + try: + # Lookup the public facing port Nat'ed to ssh port. + port = client.port(container, ssh_port)[0] + except (IndexError, AttributeError, TypeError): + port = dict() + + try: + ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp'] + except KeyError: + ip = '' + + facts.update(dict( + ansible_ssh_host=ip, + ansible_ssh_port=port.get('HostPort', 0), + )) + elif connection_type == 'docker-cli': + facts.update(dict( + ansible_host=full_name, + ansible_connection='community.docker.docker', + )) + elif connection_type == 'docker-api': + facts.update(dict( + ansible_host=full_name, + ansible_connection='community.docker.docker_api', + )) + + full_facts.update(facts) + for key, value in inspect.items(): + fact_key = self._slugify(key) + full_facts[fact_key] = value + + if verbose_output: + facts.update(full_facts) + + for key, value in facts.items(): + self.inventory.set_variable(name, key, value) + + # Use constructed if applicable + # Composed variables + self._set_composite_vars(self.get_option('compose'), full_facts, name, strict=strict) + # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group + self._add_host_to_composed_groups(self.get_option('groups'), full_facts, name, strict=strict) + # Create groups based on variable values and add the corresponding hosts to it + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), full_facts, name, strict=strict) + + # We need to do this last since we also add a group called `name`. + # When we do this before a set_variable() call, the variables are assigned + # to the group, and not to the host. + if add_legacy_groups: + self.inventory.add_group(id) + self.inventory.add_host(name, group=id) + self.inventory.add_group(name) + self.inventory.add_host(name, group=name) + self.inventory.add_group(short_id) + self.inventory.add_host(name, group=short_id) + self.inventory.add_group(hostname) + self.inventory.add_host(name, group=hostname) + + if running is True: + self.inventory.add_host(name, group='running') + else: + self.inventory.add_host(name, group='stopped') + + def verify_file(self, path): + """Return the possibly of a file being consumable by this plugin.""" + return ( + super(InventoryModule, self).verify_file(path) and + path.endswith(('docker.yaml', 'docker.yml'))) + + def _create_client(self): + return AnsibleDockerClient(self, min_docker_version=MIN_DOCKER_PY, min_docker_api_version=MIN_DOCKER_API) + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path, cache) + self._read_config_data(path) + client = self._create_client() + try: + self._populate(client) + except DockerException as e: + raise AnsibleError( + 'An unexpected docker error occurred: {0}'.format(e) + ) + except RequestException as e: + raise AnsibleError( + 'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e) + ) diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_machine.py b/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_machine.py new file mode 100644 index 00000000..7133ba96 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_machine.py @@ -0,0 +1,271 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: docker_machine + author: Ximon Eighteen (@ximon18) + short_description: Docker Machine inventory source + requirements: + - L(Docker Machine,https://docs.docker.com/machine/) + extends_documentation_fragment: + - constructed + description: + - Get inventory hosts from Docker Machine. + - Uses a YAML configuration file that ends with docker_machine.(yml|yaml). + - The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key). + - The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables. + + options: + plugin: + description: token that ensures this is a source file for the C(docker_machine) plugin. + required: yes + choices: ['docker_machine', 'community.docker.docker_machine'] + daemon_env: + description: + - Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched. + - With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched. + A warning will be issued for any skipped host if the choice is C(require). + - With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched. + A warning will be issued for hosts where they cannot be fetched if the choice is C(optional). + - With C(skip), do not attempt to fetch the docker daemon connection environment variables. + - If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables. + type: str + choices: + - require + - require-silently + - optional + - optional-silently + - skip + default: require + running_required: + description: when true, hosts which Docker Machine indicates are in a state other than C(running) will be skipped. + type: bool + default: yes + verbose_output: + description: when true, include all available nodes metadata (e.g. Image, Region, Size) as a JSON object named C(docker_machine_node_attributes). + type: bool + default: yes +''' + +EXAMPLES = ''' +# Minimal example +plugin: community.docker.docker_machine + +# Example using constructed features to create a group per Docker Machine driver +# (https://docs.docker.com/machine/drivers/), e.g.: +# $ docker-machine create --driver digitalocean ... mymachine +# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine +# { +# ... +# "digitalocean": { +# "hosts": [ +# "mymachine" +# ] +# ... +# } +strict: no +keyed_groups: + - separator: '' + key: docker_machine_node_attributes.DriverName + +# Example grouping hosts by Digital Machine tag +strict: no +keyed_groups: + - prefix: tag + key: 'dm_tags' + +# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key +compose: + ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"' +''' + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native +from ansible.module_utils._text import to_text +from ansible.module_utils.common.process import get_bin_path +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable +from ansible.utils.display import Display + +import json +import re +import subprocess + +display = Display() + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + ''' Host inventory parser for ansible using Docker machine as source. ''' + + NAME = 'community.docker.docker_machine' + + DOCKER_MACHINE_PATH = None + + def _run_command(self, args): + if not self.DOCKER_MACHINE_PATH: + try: + self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine') + except ValueError as e: + raise AnsibleError(to_native(e)) + + command = [self.DOCKER_MACHINE_PATH] + command.extend(args) + display.debug('Executing command {0}'.format(command)) + try: + result = subprocess.check_output(command) + except subprocess.CalledProcessError as e: + display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e)) + raise e + + return to_text(result).strip() + + def _get_docker_daemon_variables(self, machine_name): + ''' + Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on + the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'. + ''' + try: + env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines() + except subprocess.CalledProcessError: + # This can happen when the machine is created but provisioning is incomplete + return [] + + # example output of docker-machine env --shell=sh: + # export DOCKER_TLS_VERIFY="1" + # export DOCKER_HOST="tcp://134.209.204.160:2376" + # export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator" + # export DOCKER_MACHINE_NAME="routinator" + # # Run this command to configure your shell: + # # eval $(docker-machine env --shell=bash routinator) + + # capture any of the DOCKER_xxx variables that were output and create Ansible host vars + # with the same name and value but with a dm_ name prefix. + vars = [] + for line in env_lines: + match = re.search('(DOCKER_[^=]+)="([^"]+)"', line) + if match: + env_var_name = match.group(1) + env_var_value = match.group(2) + vars.append((env_var_name, env_var_value)) + + return vars + + def _get_machine_names(self): + # Filter out machines that are not in the Running state as we probably can't do anything useful actions + # with them. + ls_command = ['ls', '-q'] + if self.get_option('running_required'): + ls_command.extend(['--filter', 'state=Running']) + + try: + ls_lines = self._run_command(ls_command) + except subprocess.CalledProcessError: + return [] + + return ls_lines.splitlines() + + def _inspect_docker_machine_host(self, node): + try: + inspect_lines = self._run_command(['inspect', self.node]) + except subprocess.CalledProcessError: + return None + + return json.loads(inspect_lines) + + def _ip_addr_docker_machine_host(self, node): + try: + ip_addr = self._run_command(['ip', self.node]) + except subprocess.CalledProcessError: + return None + + return ip_addr + + def _should_skip_host(self, machine_name, env_var_tuples, daemon_env): + if not env_var_tuples: + warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name) + if daemon_env in ('require', 'require-silently'): + if daemon_env == 'require': + display.warning('{0}: host will be skipped'.format(warning_prefix)) + return True + else: # 'optional', 'optional-silently' + if daemon_env == 'optional': + display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix)) + return False + + def _populate(self): + daemon_env = self.get_option('daemon_env') + try: + for self.node in self._get_machine_names(): + self.node_attrs = self._inspect_docker_machine_host(self.node) + if not self.node_attrs: + continue + + machine_name = self.node_attrs['Driver']['MachineName'] + + # query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands + # that could be used to set environment variables to influence a local Docker client: + if daemon_env == 'skip': + env_var_tuples = [] + else: + env_var_tuples = self._get_docker_daemon_variables(machine_name) + if self._should_skip_host(machine_name, env_var_tuples, daemon_env): + continue + + # add an entry in the inventory for this host + self.inventory.add_host(machine_name) + + # check for valid ip address from inspect output, else explicitly use ip command to find host ip address + # this works around an issue seen with Google Compute Platform where the IP address was not available + # via the 'inspect' subcommand but was via the 'ip' subcomannd. + if self.node_attrs['Driver']['IPAddress']: + ip_addr = self.node_attrs['Driver']['IPAddress'] + else: + ip_addr = self._ip_addr_docker_machine_host(self.node) + + # set standard Ansible remote host connection settings to details captured from `docker-machine` + # see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html + self.inventory.set_variable(machine_name, 'ansible_host', ip_addr) + self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort']) + self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser']) + self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath']) + + # set variables based on Docker Machine tags + tags = self.node_attrs['Driver'].get('Tags') or '' + self.inventory.set_variable(machine_name, 'dm_tags', tags) + + # set variables based on Docker Machine env variables + for kv in env_var_tuples: + self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1]) + + if self.get_option('verbose_output'): + self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs) + + # Use constructed if applicable + strict = self.get_option('strict') + + # Composed variables + self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict) + + # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group + self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict) + + # Create groups based on variable values and add the corresponding hosts to it + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict) + + except Exception as e: + raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' % + to_native(e), orig_exc=e) + + def verify_file(self, path): + """Return the possibility of a file being consumable by this plugin.""" + return ( + super(InventoryModule, self).verify_file(path) and + path.endswith(('docker_machine.yaml', 'docker_machine.yml'))) + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path, cache) + self._read_config_data(path) + self._populate() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_swarm.py b/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_swarm.py new file mode 100644 index 00000000..65c5f719 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_swarm.py @@ -0,0 +1,254 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com> +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' + name: docker_swarm + author: + - Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com> + short_description: Ansible dynamic inventory plugin for Docker swarm nodes. + requirements: + - python >= 2.7 + - L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 + extends_documentation_fragment: + - constructed + description: + - Reads inventories from the Docker swarm API. + - Uses a YAML configuration file docker_swarm.[yml|yaml]. + - "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes; + I(managers) - all manager nodes; I(leader) - the swarm leader node; + I(nonleaders) - all nodes except the swarm leader." + options: + plugin: + description: The name of this plugin, it should always be set to C(community.docker.docker_swarm) + for this plugin to recognize it as it's own. + type: str + required: true + choices: [ docker_swarm, community.docker.docker_swarm ] + docker_host: + description: + - Socket of a Docker swarm manager node (C(tcp), C(unix)). + - "Use C(unix://var/run/docker.sock) to connect via local socket." + type: str + required: true + aliases: [ docker_url ] + verbose_output: + description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS), + C(EngineVersion)) + type: bool + default: yes + tls: + description: Connect using TLS without verifying the authenticity of the Docker host server. + type: bool + default: no + validate_certs: + description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker + host server. + type: bool + default: no + aliases: [ tls_verify ] + client_key: + description: Path to the client's TLS key file. + type: path + aliases: [ tls_client_key, key_path ] + ca_cert: + description: Use a CA certificate when performing server verification by providing the path to a CA + certificate file. + type: path + aliases: [ tls_ca_cert, cacert_path ] + client_cert: + description: Path to the client's TLS certificate file. + type: path + aliases: [ tls_client_cert, cert_path ] + tls_hostname: + description: When verifying the authenticity of the Docker host server, provide the expected name of + the server. + type: str + ssl_version: + description: Provide a valid SSL version number. Default value determined by ssl.py module. + type: str + api_version: + description: + - The version of the Docker API running on the Docker Host. + - Defaults to the latest version of the API supported by docker-py. + type: str + aliases: [ docker_api_version ] + timeout: + description: + - The maximum amount of time in seconds to wait on a response from the API. + - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) + will be used instead. If the environment variable is not set, the default value will be used. + type: int + default: 60 + aliases: [ time_out ] + include_host_uri: + description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the + swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional + modification as value of option I(docker_host) in Docker Swarm modules when connecting via API. + The port always defaults to C(2376). + type: bool + default: no + include_host_uri_port: + description: Override the detected port number included in I(ansible_host_uri) + type: int +''' + +EXAMPLES = ''' +# Minimal example using local docker +plugin: community.docker.docker_swarm +docker_host: unix://var/run/docker.sock + +# Minimal example using remote docker +plugin: community.docker.docker_swarm +docker_host: tcp://my-docker-host:2375 + +# Example using remote docker with unverified TLS +plugin: community.docker.docker_swarm +docker_host: tcp://my-docker-host:2376 +tls: yes + +# Example using remote docker with verified TLS and client certificate verification +plugin: community.docker.docker_swarm +docker_host: tcp://my-docker-host:2376 +validate_certs: yes +ca_cert: /somewhere/ca.pem +client_key: /somewhere/key.pem +client_cert: /somewhere/cert.pem + +# Example using constructed features to create groups and set ansible_host +plugin: community.docker.docker_swarm +docker_host: tcp://my-docker-host:2375 +strict: False +keyed_groups: + # add e.g. x86_64 hosts to an arch_x86_64 group + - prefix: arch + key: 'Description.Platform.Architecture' + # add e.g. linux hosts to an os_linux group + - prefix: os + key: 'Description.Platform.OS' + # create a group per node label + # e.g. a node labeled w/ "production" ends up in group "label_production" + # hint: labels containing special characters will be converted to safe names + - key: 'Spec.Labels' + prefix: label +''' + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native +from ansible_collections.community.docker.plugins.module_utils.common import update_tls_hostname, get_connect_params +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible.parsing.utils.addresses import parse_address + +try: + import docker + HAS_DOCKER = True +except ImportError: + HAS_DOCKER = False + + +class InventoryModule(BaseInventoryPlugin, Constructable): + ''' Host inventory parser for ansible using Docker swarm as source. ''' + + NAME = 'community.docker.docker_swarm' + + def _fail(self, msg): + raise AnsibleError(msg) + + def _populate(self): + raw_params = dict( + docker_host=self.get_option('docker_host'), + tls=self.get_option('tls'), + tls_verify=self.get_option('validate_certs'), + key_path=self.get_option('client_key'), + cacert_path=self.get_option('ca_cert'), + cert_path=self.get_option('client_cert'), + tls_hostname=self.get_option('tls_hostname'), + api_version=self.get_option('api_version'), + timeout=self.get_option('timeout'), + ssl_version=self.get_option('ssl_version'), + debug=None, + ) + update_tls_hostname(raw_params) + connect_params = get_connect_params(raw_params, fail_function=self._fail) + self.client = docker.DockerClient(**connect_params) + self.inventory.add_group('all') + self.inventory.add_group('manager') + self.inventory.add_group('worker') + self.inventory.add_group('leader') + self.inventory.add_group('nonleaders') + + if self.get_option('include_host_uri'): + if self.get_option('include_host_uri_port'): + host_uri_port = str(self.get_option('include_host_uri_port')) + elif self.get_option('tls') or self.get_option('validate_certs'): + host_uri_port = '2376' + else: + host_uri_port = '2375' + + try: + self.nodes = self.client.nodes.list() + for self.node in self.nodes: + self.node_attrs = self.client.nodes.get(self.node.id).attrs + self.inventory.add_host(self.node_attrs['ID']) + self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role']) + self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', + self.node_attrs['Status']['Addr']) + if self.get_option('include_host_uri'): + self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri', + 'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port) + if self.get_option('verbose_output'): + self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs) + if 'ManagerStatus' in self.node_attrs: + if self.node_attrs['ManagerStatus'].get('Leader'): + # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0 + # Check moby/moby#35437 for details + swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \ + self.node_attrs['Status']['Addr'] + if self.get_option('include_host_uri'): + self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri', + 'tcp://' + swarm_leader_ip + ':' + host_uri_port) + self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip) + self.inventory.add_host(self.node_attrs['ID'], group='leader') + else: + self.inventory.add_host(self.node_attrs['ID'], group='nonleaders') + else: + self.inventory.add_host(self.node_attrs['ID'], group='nonleaders') + # Use constructed if applicable + strict = self.get_option('strict') + # Composed variables + self._set_composite_vars(self.get_option('compose'), + self.node_attrs, + self.node_attrs['ID'], + strict=strict) + # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group + self._add_host_to_composed_groups(self.get_option('groups'), + self.node_attrs, + self.node_attrs['ID'], + strict=strict) + # Create groups based on variable values and add the corresponding hosts to it + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), + self.node_attrs, + self.node_attrs['ID'], + strict=strict) + except Exception as e: + raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' % + to_native(e)) + + def verify_file(self, path): + """Return the possibly of a file being consumable by this plugin.""" + return ( + super(InventoryModule, self).verify_file(path) and + path.endswith(('docker_swarm.yaml', 'docker_swarm.yml'))) + + def parse(self, inventory, loader, path, cache=True): + if not HAS_DOCKER: + raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: ' + 'https://github.com/docker/docker-py.') + super(InventoryModule, self).parse(inventory, loader, path, cache) + self._read_config_data(path) + self._populate() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/__init__.py b/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/__init__.py diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/common.py b/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/common.py new file mode 100644 index 00000000..c828b879 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/common.py @@ -0,0 +1,1022 @@ +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import abc +import os +import platform +import re +import sys +from datetime import timedelta +from distutils.version import LooseVersion + + +from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib +from ansible.module_utils.common._collections_compat import Mapping, Sequence +from ansible.module_utils.six import string_types +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE + +HAS_DOCKER_PY = True +HAS_DOCKER_PY_2 = False +HAS_DOCKER_PY_3 = False +HAS_DOCKER_ERROR = None + +try: + from requests.exceptions import SSLError + from docker import __version__ as docker_version + from docker.errors import APIError, NotFound, TLSParameterError + from docker.tls import TLSConfig + from docker import auth + + if LooseVersion(docker_version) >= LooseVersion('3.0.0'): + HAS_DOCKER_PY_3 = True + from docker import APIClient as Client + elif LooseVersion(docker_version) >= LooseVersion('2.0.0'): + HAS_DOCKER_PY_2 = True + from docker import APIClient as Client + else: + from docker import Client + +except ImportError as exc: + HAS_DOCKER_ERROR = str(exc) + HAS_DOCKER_PY = False + + +# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used +# to ensure the user does not have both ``docker`` and ``docker-py`` modules +# installed, as they utilize the same namespace are are incompatible +try: + # docker (Docker SDK for Python >= 2.0.0) + import docker.models # noqa: F401 + HAS_DOCKER_MODELS = True +except ImportError: + HAS_DOCKER_MODELS = False + +try: + # docker-py (Docker SDK for Python < 2.0.0) + import docker.ssladapter # noqa: F401 + HAS_DOCKER_SSLADAPTER = True +except ImportError: + HAS_DOCKER_SSLADAPTER = False + + +try: + from requests.exceptions import RequestException +except ImportError: + # Either docker-py is no longer using requests, or docker-py isn't around either, + # or docker-py's dependency requests is missing. In any case, define an exception + # class RequestException so that our code doesn't break. + class RequestException(Exception): + pass + + +DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock' +DEFAULT_TLS = False +DEFAULT_TLS_VERIFY = False +DEFAULT_TLS_HOSTNAME = 'localhost' +MIN_DOCKER_VERSION = "1.8.0" +DEFAULT_TIMEOUT_SECONDS = 60 + +DOCKER_COMMON_ARGS = dict( + docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']), + tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])), + api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']), + timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])), + ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']), + client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']), + client_key=dict(type='path', aliases=['tls_client_key', 'key_path']), + ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])), + tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])), + validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']), + debug=dict(type='bool', default=False) +) + +DOCKER_MUTUALLY_EXCLUSIVE = [] + +DOCKER_REQUIRED_TOGETHER = [ + ['client_cert', 'client_key'] +] + +DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/' +EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+' +BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] + + +if not HAS_DOCKER_PY: + docker_version = None + + # No Docker SDK for Python. Create a place holder client to allow + # instantiation of AnsibleModule and proper error handing + class Client(object): # noqa: F811 + def __init__(self, **kwargs): + pass + + class APIError(Exception): # noqa: F811 + pass + + class NotFound(Exception): # noqa: F811 + pass + + +def is_image_name_id(name): + """Check whether the given image name is in fact an image ID (hash).""" + if re.match('^sha256:[0-9a-fA-F]{64}$', name): + return True + return False + + +def is_valid_tag(tag, allow_empty=False): + """Check whether the given string is a valid docker tag name.""" + if not tag: + return allow_empty + # See here ("Extended description") for a definition what tags can be: + # https://docs.docker.com/engine/reference/commandline/tag/ + return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag)) + + +def sanitize_result(data): + """Sanitize data object for return to Ansible. + + When the data object contains types such as docker.types.containers.HostConfig, + Ansible will fail when these are returned via exit_json or fail_json. + HostConfig is derived from dict, but its constructor requires additional + arguments. This function sanitizes data structures by recursively converting + everything derived from dict to dict and everything derived from list (and tuple) + to a list. + """ + if isinstance(data, dict): + return dict((k, sanitize_result(v)) for k, v in data.items()) + elif isinstance(data, (list, tuple)): + return [sanitize_result(v) for v in data] + else: + return data + + +class DockerBaseClass(object): + def __init__(self): + self.debug = False + + def log(self, msg, pretty_print=False): + pass + # if self.debug: + # log_file = open('docker.log', 'a') + # if pretty_print: + # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': '))) + # log_file.write(u'\n') + # else: + # log_file.write(msg + u'\n') + + +def update_tls_hostname(result): + if result['tls_hostname'] is None: + # get default machine name from the url + parsed_url = urlparse(result['docker_host']) + if ':' in parsed_url.netloc: + result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')] + else: + result['tls_hostname'] = parsed_url + + +def _get_tls_config(fail_function, **kwargs): + try: + tls_config = TLSConfig(**kwargs) + return tls_config + except TLSParameterError as exc: + fail_function("TLS config error: %s" % exc) + + +def get_connect_params(auth, fail_function): + if auth['tls'] or auth['tls_verify']: + auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://') + + if auth['tls_verify'] and auth['cert_path'] and auth['key_path']: + # TLS with certs and host verification + if auth['cacert_path']: + tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + ca_cert=auth['cacert_path'], + verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version'], + fail_function=fail_function) + else: + tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version'], + fail_function=fail_function) + + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls_verify'] and auth['cacert_path']: + # TLS with cacert only + tls_config = _get_tls_config(ca_cert=auth['cacert_path'], + assert_hostname=auth['tls_hostname'], + verify=True, + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls_verify']: + # TLS with verify and no certs + tls_config = _get_tls_config(verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls'] and auth['cert_path'] and auth['key_path']: + # TLS with certs and no host verification + tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + verify=False, + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls']: + # TLS with no certs and not host verification + tls_config = _get_tls_config(verify=False, + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + # No TLS + return dict(base_url=auth['docker_host'], + version=auth['api_version'], + timeout=auth['timeout']) + + +DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`." +DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade." +DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. " + "Hint: if you do not need Python 2.6 support, try " + "`pip uninstall docker-py` instead, followed by `pip install docker`.") + + +class AnsibleDockerClientBase(Client): + def __init__(self, min_docker_version=None, min_docker_api_version=None): + if min_docker_version is None: + min_docker_version = MIN_DOCKER_VERSION + NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0')) + + self.docker_py_version = LooseVersion(docker_version) + + if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER: + self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker " + "SDK for Python) installed together as they use the same namespace and cause a corrupt " + "installation. Please uninstall both packages, and re-install only the docker-py or docker " + "python module (for %s's Python %s). It is recommended to install the docker module if no " + "support for Python 2.6 is required. Please note that simply uninstalling one of the modules " + "can leave the other module in a broken state." % (platform.node(), sys.executable)) + + if not HAS_DOCKER_PY: + if NEEDS_DOCKER_PY2: + msg = missing_required_lib("Docker SDK for Python: docker") + msg = msg + ", for example via `pip install docker`. The error was: %s" + else: + msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)") + msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s" + self.fail(msg % HAS_DOCKER_ERROR) + + if self.docker_py_version < LooseVersion(min_docker_version): + msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s." + if not NEEDS_DOCKER_PY2: + # The minimal required version is < 2.0 (and the current version as well). + # Advertise docker (instead of docker-py) for non-Python-2.6 users. + msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER + elif docker_version < LooseVersion('2.0'): + msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER + else: + msg += DOCKERPYUPGRADE_UPGRADE_DOCKER + self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version)) + + self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail) + + try: + super(AnsibleDockerClientBase, self).__init__(**self._connect_params) + self.docker_api_version_str = self.version()['ApiVersion'] + except APIError as exc: + self.fail("Docker API error: %s" % exc) + except Exception as exc: + self.fail("Error connecting: %s" % exc) + + self.docker_api_version = LooseVersion(self.docker_api_version_str) + if min_docker_api_version is not None: + if self.docker_api_version < LooseVersion(min_docker_api_version): + self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version)) + + def log(self, msg, pretty_print=False): + pass + # if self.debug: + # log_file = open('docker.log', 'a') + # if pretty_print: + # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': '))) + # log_file.write(u'\n') + # else: + # log_file.write(msg + u'\n') + + @abc.abstractmethod + def fail(self, msg, **kwargs): + pass + + @staticmethod + def _get_value(param_name, param_value, env_variable, default_value): + if param_value is not None: + # take module parameter value + if param_value in BOOLEANS_TRUE: + return True + if param_value in BOOLEANS_FALSE: + return False + return param_value + + if env_variable is not None: + env_value = os.environ.get(env_variable) + if env_value is not None: + # take the env variable value + if param_name == 'cert_path': + return os.path.join(env_value, 'cert.pem') + if param_name == 'cacert_path': + return os.path.join(env_value, 'ca.pem') + if param_name == 'key_path': + return os.path.join(env_value, 'key.pem') + if env_value in BOOLEANS_TRUE: + return True + if env_value in BOOLEANS_FALSE: + return False + return env_value + + # take the default + return default_value + + @abc.abstractmethod + def _get_params(self): + pass + + @property + def auth_params(self): + # Get authentication credentials. + # Precedence: module parameters-> environment variables-> defaults. + + self.log('Getting credentials') + + client_params = self._get_params() + + params = dict() + for key in DOCKER_COMMON_ARGS: + params[key] = client_params.get(key) + + result = dict( + docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST', + DEFAULT_DOCKER_HOST), + tls_hostname=self._get_value('tls_hostname', params['tls_hostname'], + 'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME), + api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION', + 'auto'), + cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None), + cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None), + key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None), + ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None), + tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS), + tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY', + DEFAULT_TLS_VERIFY), + timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT', + DEFAULT_TIMEOUT_SECONDS), + ) + + update_tls_hostname(result) + + return result + + def _handle_ssl_error(self, error): + match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error)) + if match: + self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. " + "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME " + "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by " + "setting the `tls` parameter to true." + % (self.auth_params['tls_hostname'], match.group(1), match.group(1))) + self.fail("SSL Exception: %s" % (error)) + + def get_container_by_id(self, container_id): + try: + self.log("Inspecting container Id %s" % container_id) + result = self.inspect_container(container=container_id) + self.log("Completed container inspection") + return result + except NotFound as dummy: + return None + except Exception as exc: + self.fail("Error inspecting container: %s" % exc) + + def get_container(self, name=None): + ''' + Lookup a container and return the inspection results. + ''' + if name is None: + return None + + search_name = name + if not name.startswith('/'): + search_name = '/' + name + + result = None + try: + for container in self.containers(all=True): + self.log("testing container: %s" % (container['Names'])) + if isinstance(container['Names'], list) and search_name in container['Names']: + result = container + break + if container['Id'].startswith(name): + result = container + break + if container['Id'] == name: + result = container + break + except SSLError as exc: + self._handle_ssl_error(exc) + except Exception as exc: + self.fail("Error retrieving container list: %s" % exc) + + if result is None: + return None + + return self.get_container_by_id(result['Id']) + + def get_network(self, name=None, network_id=None): + ''' + Lookup a network and return the inspection results. + ''' + if name is None and network_id is None: + return None + + result = None + + if network_id is None: + try: + for network in self.networks(): + self.log("testing network: %s" % (network['Name'])) + if name == network['Name']: + result = network + break + if network['Id'].startswith(name): + result = network + break + except SSLError as exc: + self._handle_ssl_error(exc) + except Exception as exc: + self.fail("Error retrieving network list: %s" % exc) + + if result is not None: + network_id = result['Id'] + + if network_id is not None: + try: + self.log("Inspecting network Id %s" % network_id) + result = self.inspect_network(network_id) + self.log("Completed network inspection") + except NotFound as dummy: + return None + except Exception as exc: + self.fail("Error inspecting network: %s" % exc) + + return result + + def find_image(self, name, tag): + ''' + Lookup an image (by name and tag) and return the inspection results. + ''' + if not name: + return None + + self.log("Find image %s:%s" % (name, tag)) + images = self._image_lookup(name, tag) + if not images: + # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub + registry, repo_name = auth.resolve_repository_name(name) + if registry == 'docker.io': + # If docker.io is explicitly there in name, the image + # isn't found in some cases (#41509) + self.log("Check for docker.io image: %s" % repo_name) + images = self._image_lookup(repo_name, tag) + if not images and repo_name.startswith('library/'): + # Sometimes library/xxx images are not found + lookup = repo_name[len('library/'):] + self.log("Check for docker.io image: %s" % lookup) + images = self._image_lookup(lookup, tag) + if not images: + # Last case: if docker.io wasn't there, it can be that + # the image wasn't found either (#15586) + lookup = "%s/%s" % (registry, repo_name) + self.log("Check for docker.io image: %s" % lookup) + images = self._image_lookup(lookup, tag) + + if len(images) > 1: + self.fail("Registry returned more than one result for %s:%s" % (name, tag)) + + if len(images) == 1: + try: + inspection = self.inspect_image(images[0]['Id']) + except Exception as exc: + self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc))) + return inspection + + self.log("Image %s:%s not found." % (name, tag)) + return None + + def find_image_by_id(self, image_id): + ''' + Lookup an image (by ID) and return the inspection results. + ''' + if not image_id: + return None + + self.log("Find image %s (by ID)" % image_id) + try: + inspection = self.inspect_image(image_id) + except Exception as exc: + self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc))) + return inspection + + def _image_lookup(self, name, tag): + ''' + Including a tag in the name parameter sent to the Docker SDK for Python images method + does not work consistently. Instead, get the result set for name and manually check + if the tag exists. + ''' + try: + response = self.images(name=name) + except Exception as exc: + self.fail("Error searching for image %s - %s" % (name, str(exc))) + images = response + if tag: + lookup = "%s:%s" % (name, tag) + lookup_digest = "%s@%s" % (name, tag) + images = [] + for image in response: + tags = image.get('RepoTags') + digests = image.get('RepoDigests') + if (tags and lookup in tags) or (digests and lookup_digest in digests): + images = [image] + break + return images + + def pull_image(self, name, tag="latest"): + ''' + Pull an image + ''' + self.log("Pulling image %s:%s" % (name, tag)) + old_tag = self.find_image(name, tag) + try: + for line in self.pull(name, tag=tag, stream=True, decode=True): + self.log(line, pretty_print=True) + if line.get('error'): + if line.get('errorDetail'): + error_detail = line.get('errorDetail') + self.fail("Error pulling %s - code: %s message: %s" % (name, + error_detail.get('code'), + error_detail.get('message'))) + else: + self.fail("Error pulling %s - %s" % (name, line.get('error'))) + except Exception as exc: + self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc))) + + new_tag = self.find_image(name, tag) + + return new_tag, old_tag == new_tag + + def inspect_distribution(self, image, **kwargs): + ''' + Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0 + since prior versions did not support accessing private repositories. + ''' + if self.docker_py_version < LooseVersion('4.0.0'): + registry = auth.resolve_repository_name(image)[0] + header = auth.get_config_header(self, registry) + if header: + return self._result(self._get( + self._url('/distribution/{0}/json', image), + headers={'X-Registry-Auth': header} + ), json=True) + return super(AnsibleDockerClientBase, self).inspect_distribution(image, **kwargs) + + +class AnsibleDockerClient(AnsibleDockerClientBase): + + def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None, + required_together=None, required_if=None, min_docker_version=None, + min_docker_api_version=None, option_minimal_versions=None, + option_minimal_versions_ignore_params=None, fail_results=None): + + # Modules can put information in here which will always be returned + # in case client.fail() is called. + self.fail_results = fail_results or {} + + merged_arg_spec = dict() + merged_arg_spec.update(DOCKER_COMMON_ARGS) + if argument_spec: + merged_arg_spec.update(argument_spec) + self.arg_spec = merged_arg_spec + + mutually_exclusive_params = [] + mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE + if mutually_exclusive: + mutually_exclusive_params += mutually_exclusive + + required_together_params = [] + required_together_params += DOCKER_REQUIRED_TOGETHER + if required_together: + required_together_params += required_together + + self.module = AnsibleModule( + argument_spec=merged_arg_spec, + supports_check_mode=supports_check_mode, + mutually_exclusive=mutually_exclusive_params, + required_together=required_together_params, + required_if=required_if) + + self.debug = self.module.params.get('debug') + self.check_mode = self.module.check_mode + + super(AnsibleDockerClient, self).__init__( + min_docker_version=min_docker_version, + min_docker_api_version=min_docker_api_version) + + if option_minimal_versions is not None: + self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params) + + def fail(self, msg, **kwargs): + self.fail_results.update(kwargs) + self.module.fail_json(msg=msg, **sanitize_result(self.fail_results)) + + def _get_params(self): + return self.module.params + + def _get_minimal_versions(self, option_minimal_versions, ignore_params=None): + self.option_minimal_versions = dict() + for option in self.module.argument_spec: + if ignore_params is not None: + if option in ignore_params: + continue + self.option_minimal_versions[option] = dict() + self.option_minimal_versions.update(option_minimal_versions) + + for option, data in self.option_minimal_versions.items(): + # Test whether option is supported, and store result + support_docker_py = True + support_docker_api = True + if 'docker_py_version' in data: + support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version']) + if 'docker_api_version' in data: + support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version']) + data['supported'] = support_docker_py and support_docker_api + # Fail if option is not supported but used + if not data['supported']: + # Test whether option is specified + if 'detect_usage' in data: + used = data['detect_usage'](self) + else: + used = self.module.params.get(option) is not None + if used and 'default' in self.module.argument_spec[option]: + used = self.module.params[option] != self.module.argument_spec[option]['default'] + if used: + # If the option is used, compose error message. + if 'usage_msg' in data: + usg = data['usage_msg'] + else: + usg = 'set %s option' % (option, ) + if not support_docker_api: + msg = 'Docker API version is %s. Minimum version required is %s to %s.' + msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg) + elif not support_docker_py: + msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. " + if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'): + msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER + elif self.docker_py_version < LooseVersion('2.0.0'): + msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER + else: + msg += DOCKERPYUPGRADE_UPGRADE_DOCKER + msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg) + else: + # should not happen + msg = 'Cannot %s with your configuration.' % (usg, ) + self.fail(msg) + + def report_warnings(self, result, warnings_key=None): + ''' + Checks result of client operation for warnings, and if present, outputs them. + + warnings_key should be a list of keys used to crawl the result dictionary. + For example, if warnings_key == ['a', 'b'], the function will consider + result['a']['b'] if these keys exist. If the result is a non-empty string, it + will be reported as a warning. If the result is a list, every entry will be + reported as a warning. + + In most cases (if warnings are returned at all), warnings_key should be + ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings']. + ''' + if warnings_key is None: + warnings_key = ['Warnings'] + for key in warnings_key: + if not isinstance(result, Mapping): + return + result = result.get(key) + if isinstance(result, Sequence): + for warning in result: + self.module.warn('Docker warning: {0}'.format(warning)) + elif isinstance(result, string_types) and result: + self.module.warn('Docker warning: {0}'.format(result)) + + +def compare_dict_allow_more_present(av, bv): + ''' + Compare two dictionaries for whether every entry of the first is in the second. + ''' + for key, value in av.items(): + if key not in bv: + return False + if bv[key] != value: + return False + return True + + +def compare_generic(a, b, method, datatype): + ''' + Compare values a and b as described by method and datatype. + + Returns ``True`` if the values compare equal, and ``False`` if not. + + ``a`` is usually the module's parameter, while ``b`` is a property + of the current object. ``a`` must not be ``None`` (except for + ``datatype == 'value'``). + + Valid values for ``method`` are: + - ``ignore`` (always compare as equal); + - ``strict`` (only compare if really equal) + - ``allow_more_present`` (allow b to have elements which a does not have). + + Valid values for ``datatype`` are: + - ``value``: for simple values (strings, numbers, ...); + - ``list``: for ``list``s or ``tuple``s where order matters; + - ``set``: for ``list``s, ``tuple``s or ``set``s where order does not + matter; + - ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does + not matter and which contain ``dict``s; ``allow_more_present`` is used + for the ``dict``s, and these are assumed to be dictionaries of values; + - ``dict``: for dictionaries of values. + ''' + if method == 'ignore': + return True + # If a or b is None: + if a is None or b is None: + # If both are None: equality + if a == b: + return True + # Otherwise, not equal for values, and equal + # if the other is empty for set/list/dict + if datatype == 'value': + return False + # For allow_more_present, allow a to be None + if method == 'allow_more_present' and a is None: + return True + # Otherwise, the iterable object which is not None must have length 0 + return len(b if a is None else a) == 0 + # Do proper comparison (both objects not None) + if datatype == 'value': + return a == b + elif datatype == 'list': + if method == 'strict': + return a == b + else: + i = 0 + for v in a: + while i < len(b) and b[i] != v: + i += 1 + if i == len(b): + return False + i += 1 + return True + elif datatype == 'dict': + if method == 'strict': + return a == b + else: + return compare_dict_allow_more_present(a, b) + elif datatype == 'set': + set_a = set(a) + set_b = set(b) + if method == 'strict': + return set_a == set_b + else: + return set_b >= set_a + elif datatype == 'set(dict)': + for av in a: + found = False + for bv in b: + if compare_dict_allow_more_present(av, bv): + found = True + break + if not found: + return False + if method == 'strict': + # If we would know that both a and b do not contain duplicates, + # we could simply compare len(a) to len(b) to finish this test. + # We can assume that b has no duplicates (as it is returned by + # docker), but we don't know for a. + for bv in b: + found = False + for av in a: + if compare_dict_allow_more_present(av, bv): + found = True + break + if not found: + return False + return True + + +class DifferenceTracker(object): + def __init__(self): + self._diff = [] + + def add(self, name, parameter=None, active=None): + self._diff.append(dict( + name=name, + parameter=parameter, + active=active, + )) + + def merge(self, other_tracker): + self._diff.extend(other_tracker._diff) + + @property + def empty(self): + return len(self._diff) == 0 + + def get_before_after(self): + ''' + Return texts ``before`` and ``after``. + ''' + before = dict() + after = dict() + for item in self._diff: + before[item['name']] = item['active'] + after[item['name']] = item['parameter'] + return before, after + + def has_difference_for(self, name): + ''' + Returns a boolean if a difference exists for name + ''' + return any(diff for diff in self._diff if diff['name'] == name) + + def get_legacy_docker_container_diffs(self): + ''' + Return differences in the docker_container legacy format. + ''' + result = [] + for entry in self._diff: + item = dict() + item[entry['name']] = dict( + parameter=entry['parameter'], + container=entry['active'], + ) + result.append(item) + return result + + def get_legacy_docker_diffs(self): + ''' + Return differences in the docker_container legacy format. + ''' + result = [entry['name'] for entry in self._diff] + return result + + +def clean_dict_booleans_for_docker_api(data): + ''' + Go doesn't like Python booleans 'True' or 'False', while Ansible is just + fine with them in YAML. As such, they need to be converted in cases where + we pass dictionaries to the Docker API (e.g. docker_network's + driver_options and docker_prune's filters). + ''' + result = dict() + if data is not None: + for k, v in data.items(): + if v is True: + v = 'true' + elif v is False: + v = 'false' + else: + v = str(v) + result[str(k)] = v + return result + + +def convert_duration_to_nanosecond(time_str): + """ + Return time duration in nanosecond. + """ + if not isinstance(time_str, str): + raise ValueError('Missing unit in duration - %s' % time_str) + + regex = re.compile( + r'^(((?P<hours>\d+)h)?' + r'((?P<minutes>\d+)m(?!s))?' + r'((?P<seconds>\d+)s)?' + r'((?P<milliseconds>\d+)ms)?' + r'((?P<microseconds>\d+)us)?)$' + ) + parts = regex.match(time_str) + + if not parts: + raise ValueError('Invalid time duration - %s' % time_str) + + parts = parts.groupdict() + time_params = {} + for (name, value) in parts.items(): + if value: + time_params[name] = int(value) + + delta = timedelta(**time_params) + time_in_nanoseconds = ( + delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6 + ) * 10 ** 3 + + return time_in_nanoseconds + + +def parse_healthcheck(healthcheck): + """ + Return dictionary of healthcheck parameters and boolean if + healthcheck defined in image was requested to be disabled. + """ + if (not healthcheck) or (not healthcheck.get('test')): + return None, None + + result = dict() + + # All supported healthcheck parameters + options = dict( + test='test', + interval='interval', + timeout='timeout', + start_period='start_period', + retries='retries' + ) + + duration_options = ['interval', 'timeout', 'start_period'] + + for (key, value) in options.items(): + if value in healthcheck: + if healthcheck.get(value) is None: + # due to recursive argument_spec, all keys are always present + # (but have default value None if not specified) + continue + if value in duration_options: + time = convert_duration_to_nanosecond(healthcheck.get(value)) + if time: + result[key] = time + elif healthcheck.get(value): + result[key] = healthcheck.get(value) + if key == 'test': + if isinstance(result[key], (tuple, list)): + result[key] = [str(e) for e in result[key]] + else: + result[key] = ['CMD-SHELL', str(result[key])] + elif key == 'retries': + try: + result[key] = int(result[key]) + except ValueError: + raise ValueError( + 'Cannot parse number of retries for healthcheck. ' + 'Expected an integer, got "{0}".'.format(result[key]) + ) + + if result['test'] == ['NONE']: + # If the user explicitly disables the healthcheck, return None + # as the healthcheck object, and set disable_healthcheck to True + return None, True + + return result, False + + +def omit_none_from_dict(d): + """ + Return a copy of the dictionary with all keys with value None omitted. + """ + return dict((k, v) for (k, v) in d.items() if v is not None) diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/swarm.py b/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/swarm.py new file mode 100644 index 00000000..04e34cc7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/swarm.py @@ -0,0 +1,280 @@ +# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl> +# (c) Thierry Bouvet (@tbouvet) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import json +from time import sleep + +try: + from docker.errors import APIError, NotFound +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible.module_utils._text import to_native +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClient, + LooseVersion, +) + + +class AnsibleDockerSwarmClient(AnsibleDockerClient): + + def __init__(self, **kwargs): + super(AnsibleDockerSwarmClient, self).__init__(**kwargs) + + def get_swarm_node_id(self): + """ + Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID + of Docker host the module is executed on + :return: + NodeID of host or 'None' if not part of Swarm + """ + + try: + info = self.info() + except APIError as exc: + self.fail("Failed to get node information for %s" % to_native(exc)) + + if info: + json_str = json.dumps(info, ensure_ascii=False) + swarm_info = json.loads(json_str) + if swarm_info['Swarm']['NodeID']: + return swarm_info['Swarm']['NodeID'] + return None + + def check_if_swarm_node(self, node_id=None): + """ + Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host + system information looking if specific key in output exists. If 'node_id' is provided then it tries to + read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if + it is not executed on Swarm manager + + :param node_id: Node identifier + :return: + bool: True if node is part of Swarm, False otherwise + """ + + if node_id is None: + try: + info = self.info() + except APIError: + self.fail("Failed to get host information.") + + if info: + json_str = json.dumps(info, ensure_ascii=False) + swarm_info = json.loads(json_str) + if swarm_info['Swarm']['NodeID']: + return True + if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'): + return True + return False + else: + try: + node_info = self.get_node_inspect(node_id=node_id) + except APIError: + return + + if node_info['ID'] is not None: + return True + return False + + def check_if_swarm_manager(self): + """ + Checks if node role is set as Manager in Swarm. The node is the docker host on which module action + is performed. The inspect_swarm() will fail if node is not a manager + + :return: True if node is Swarm Manager, False otherwise + """ + + try: + self.inspect_swarm() + return True + except APIError: + return False + + def fail_task_if_not_swarm_manager(self): + """ + If host is not a swarm manager then Ansible task on this host should end with 'failed' state + """ + if not self.check_if_swarm_manager(): + self.fail("Error running docker swarm module: must run on swarm manager node") + + def check_if_swarm_worker(self): + """ + Checks if node role is set as Worker in Swarm. The node is the docker host on which module action + is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node() + + :return: True if node is Swarm Worker, False otherwise + """ + + if self.check_if_swarm_node() and not self.check_if_swarm_manager(): + return True + return False + + def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1): + """ + Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about + node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or + host that is not part of Swarm it will fail the playbook + + :param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once + :param node_id: node ID or name, if None then method will try to get node_id of host module run on + :return: + True if node is part of swarm but its state is down, False otherwise + """ + + if repeat_check < 1: + repeat_check = 1 + + if node_id is None: + node_id = self.get_swarm_node_id() + + for retry in range(0, repeat_check): + if retry > 0: + sleep(5) + node_info = self.get_node_inspect(node_id=node_id) + if node_info['Status']['State'] == 'down': + return True + return False + + def get_node_inspect(self, node_id=None, skip_missing=False): + """ + Returns Swarm node info as in 'docker node inspect' command about single node + + :param skip_missing: if True then function will return None instead of failing the task + :param node_id: node ID or name, if None then method will try to get node_id of host module run on + :return: + Single node information structure + """ + + if node_id is None: + node_id = self.get_swarm_node_id() + + if node_id is None: + self.fail("Failed to get node information.") + + try: + node_info = self.inspect_node(node_id=node_id) + except APIError as exc: + if exc.status_code == 503: + self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager") + if exc.status_code == 404: + if skip_missing: + return None + self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) + except Exception as exc: + self.fail("Error inspecting swarm node: %s" % exc) + + json_str = json.dumps(node_info, ensure_ascii=False) + node_info = json.loads(json_str) + + if 'ManagerStatus' in node_info: + if node_info['ManagerStatus'].get('Leader'): + # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0 + # Check moby/moby#35437 for details + count_colons = node_info['ManagerStatus']['Addr'].count(":") + if count_colons == 1: + swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr'] + else: + swarm_leader_ip = node_info['Status']['Addr'] + node_info['Status']['Addr'] = swarm_leader_ip + return node_info + + def get_all_nodes_inspect(self): + """ + Returns Swarm node info as in 'docker node inspect' command about all registered nodes + + :return: + Structure with information about all nodes + """ + try: + node_info = self.nodes() + except APIError as exc: + if exc.status_code == 503: + self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager") + self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) + except Exception as exc: + self.fail("Error inspecting swarm node: %s" % exc) + + json_str = json.dumps(node_info, ensure_ascii=False) + node_info = json.loads(json_str) + return node_info + + def get_all_nodes_list(self, output='short'): + """ + Returns list of nodes registered in Swarm + + :param output: Defines format of returned data + :return: + If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm, + if 'output' is 'long' then returns data is list of dict containing the attributes as in + output of command 'docker node ls' + """ + nodes_list = [] + + nodes_inspect = self.get_all_nodes_inspect() + if nodes_inspect is None: + return None + + if output == 'short': + for node in nodes_inspect: + nodes_list.append(node['Description']['Hostname']) + elif output == 'long': + for node in nodes_inspect: + node_property = {} + + node_property.update({'ID': node['ID']}) + node_property.update({'Hostname': node['Description']['Hostname']}) + node_property.update({'Status': node['Status']['State']}) + node_property.update({'Availability': node['Spec']['Availability']}) + if 'ManagerStatus' in node: + if node['ManagerStatus']['Leader'] is True: + node_property.update({'Leader': True}) + node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']}) + node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']}) + + nodes_list.append(node_property) + else: + return None + + return nodes_list + + def get_node_name_by_id(self, nodeid): + return self.get_node_inspect(nodeid)['Description']['Hostname'] + + def get_unlock_key(self): + if self.docker_py_version < LooseVersion('2.7.0'): + return None + return super(AnsibleDockerSwarmClient, self).get_unlock_key() + + def get_service_inspect(self, service_id, skip_missing=False): + """ + Returns Swarm service info as in 'docker service inspect' command about single service + + :param service_id: service ID or name + :param skip_missing: if True then function will return None instead of failing the task + :return: + Single service information structure + """ + try: + service_info = self.inspect_service(service_id) + except NotFound as exc: + if skip_missing is False: + self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) + else: + return None + except APIError as exc: + if exc.status_code == 503: + self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager") + self.fail("Error inspecting swarm service: %s" % exc) + except Exception as exc: + self.fail("Error inspecting swarm service: %s" % exc) + + json_str = json.dumps(service_info, ensure_ascii=False) + service_info = json.loads(json_str) + return service_info diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/current_container_facts.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/current_container_facts.py new file mode 100644 index 00000000..926c6ee4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/current_container_facts.py @@ -0,0 +1,100 @@ +#!/usr/bin/python +# +# (c) 2020 Matt Clay <mclay@redhat.com> +# (c) 2020 Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: current_container_facts +short_description: Return facts about whether the module runs in a Docker container +version_added: 1.1.0 +description: + - Return facts about whether the module runs in a Docker container. +author: + - Felix Fontein (@felixfontein) +''' + +EXAMPLES = ''' +- name: Get facts on current container + community.docker.current_container_facts: + +- name: Print information on current container when running in a container + ansible.builtin.debug: + msg: "Container ID is {{ ansible_module_container_id }}" + when: ansible_module_running_in_container +''' + +RETURN = ''' +ansible_facts: + description: Ansible facts returned by the module + type: dict + returned: always + contains: + ansible_module_running_in_container: + description: + - Whether the module was able to detect that it runs in a container or not. + returned: always + type: bool + ansible_module_container_id: + description: + - The detected container ID. + - Contains an empty string if no container was detected. + returned: always + type: str + ansible_module_container_type: + description: + - The detected container environment. + - Contains an empty string if no container was detected. + - Otherwise, will be one of C(docker) or C(azure_pipelines). + returned: always + type: str + # choices: + # - docker + # - azure_pipelines +''' + +import os + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule(dict(), supports_check_mode=True) + + path = '/proc/self/cpuset' + container_id = '' + container_type = '' + + if os.path.exists(path): + # File content varies based on the environment: + # No Container: / + # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507 + # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891 + # Podman: /../../../../../.. + with open(path, 'rb') as f: + contents = f.read().decode('utf-8') + + cgroup_path, cgroup_name = os.path.split(contents.strip()) + + if cgroup_path == '/docker': + container_id = cgroup_name + container_type = 'docker' + + if cgroup_path == '/azpl_job': + container_id = cgroup_name + container_type = 'azure_pipelines' + + module.exit_json(ansible_facts=dict( + ansible_module_running_in_container=container_id != '', + ansible_module_container_id=container_id, + ansible_module_container_type=container_type, + )) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_compose.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_compose.py new file mode 100644 index 00000000..e8b8532c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_compose.py @@ -0,0 +1,1148 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: docker_compose + +short_description: Manage multi-container Docker applications with Docker Compose. + + +author: "Chris Houseknecht (@chouseknecht)" + +description: + - Uses Docker Compose to start, shutdown and scale services. + - Works with compose versions 1 and 2. + - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option. + - See the examples for more details. + - Supports check mode. + - This module was called C(docker_service) before Ansible 2.8. The usage did not change. + +options: + project_src: + description: + - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file. + - Mutually exclusive with I(definition). + - Required when no I(definition) is provided. + type: path + project_name: + description: + - Provide a project name. If not provided, the project name is taken from the basename of I(project_src). + - Required when I(definition) is provided. + type: str + files: + description: + - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml). + - Files are loaded and merged in the order given. + type: list + elements: path + state: + description: + - Desired state of the project. + - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) + (with I(restarted)). + - Specifying C(absent) is the same as running C(docker-compose down). + type: str + default: present + choices: + - absent + - present + services: + description: + - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted)) + on a subset of services. + - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)). + type: list + elements: str + scale: + description: + - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key + is the name of the service and the value is an integer count for the number of containers. + type: dict + dependencies: + description: + - When I(state) is C(present) specify whether or not to include linked services. + type: bool + default: yes + definition: + description: + - Compose file describing one or more services, networks and volumes. + - Mutually exclusive with I(project_src) and I(files). + type: dict + hostname_check: + description: + - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate. + type: bool + default: no + recreate: + description: + - By default containers will be recreated when their configuration differs from the service definition. + - Setting to C(never) ignores configuration differences and leaves existing containers unchanged. + - Setting to C(always) forces recreation of all existing containers. + type: str + default: smart + choices: + - always + - never + - smart + build: + description: + - Use with I(state) C(present) to always build images prior to starting the application. + - Same as running C(docker-compose build) with the pull option. + - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents. + - Use the I(nocache) option to ignore the image cache when performing the build. + - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never). + type: bool + default: no + pull: + description: + - Use with I(state) C(present) to always pull images prior to starting the application. + - Same as running C(docker-compose pull). + - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never). + type: bool + default: no + nocache: + description: + - Use with the I(build) option to ignore the cache during the image build process. + type: bool + default: no + remove_images: + description: + - Use with I(state) C(absent) to remove all images or only local images. + type: str + choices: + - 'all' + - 'local' + remove_volumes: + description: + - Use with I(state) C(absent) to remove data volumes. + type: bool + default: no + stopped: + description: + - Use with I(state) C(present) to stop all containers defined in the Compose file. + - If I(services) is defined, only the containers listed there will be stopped. + type: bool + default: no + restarted: + description: + - Use with I(state) C(present) to restart all containers defined in the Compose file. + - If I(services) is defined, only the containers listed there will be restarted. + type: bool + default: no + remove_orphans: + description: + - Remove containers for services not defined in the Compose file. + type: bool + default: no + timeout: + description: + - timeout in seconds for container shutdown when attached or when containers are already running. + type: int + default: 10 + +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "docker-compose >= 1.7.0" + - "Docker API >= 1.20" + - "PyYAML >= 3.11" +''' + +EXAMPLES = ''' +# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the +# flask directory + +- name: Run using a project directory + hosts: localhost + gather_facts: no + tasks: + - name: Tear down existing services + community.docker.docker_compose: + project_src: flask + state: absent + + - name: Create and start services + community.docker.docker_compose: + project_src: flask + register: output + + - ansible.builtin.debug: + var: output + + - name: Run `docker-compose up` again + community.docker.docker_compose: + project_src: flask + build: no + register: output + + - ansible.builtin.debug: + var: output + + - ansible.builtin.assert: + that: "not output.changed " + + - name: Stop all services + community.docker.docker_compose: + project_src: flask + build: no + stopped: yes + register: output + + - ansible.builtin.debug: + var: output + + - ansible.builtin.assert: + that: + - "not web.flask_web_1.state.running" + - "not db.flask_db_1.state.running" + + - name: Restart services + community.docker.docker_compose: + project_src: flask + build: no + restarted: yes + register: output + + - ansible.builtin.debug: + var: output + + - ansible.builtin.assert: + that: + - "web.flask_web_1.state.running" + - "db.flask_db_1.state.running" + +- name: Scale the web service to 2 + hosts: localhost + gather_facts: no + tasks: + - community.docker.docker_compose: + project_src: flask + scale: + web: 2 + register: output + + - ansible.builtin.debug: + var: output + +- name: Run with inline v2 compose + hosts: localhost + gather_facts: no + tasks: + - community.docker.docker_compose: + project_src: flask + state: absent + + - community.docker.docker_compose: + project_name: flask + definition: + version: '2' + services: + db: + image: postgres + web: + build: "{{ playbook_dir }}/flask" + command: "python manage.py runserver 0.0.0.0:8000" + volumes: + - "{{ playbook_dir }}/flask:/code" + ports: + - "8000:8000" + depends_on: + - db + register: output + + - ansible.builtin.debug: + var: output + + - ansible.builtin.assert: + that: + - "web.flask_web_1.state.running" + - "db.flask_db_1.state.running" + +- name: Run with inline v1 compose + hosts: localhost + gather_facts: no + tasks: + - community.docker.docker_compose: + project_src: flask + state: absent + + - community.docker.docker_compose: + project_name: flask + definition: + db: + image: postgres + web: + build: "{{ playbook_dir }}/flask" + command: "python manage.py runserver 0.0.0.0:8000" + volumes: + - "{{ playbook_dir }}/flask:/code" + ports: + - "8000:8000" + links: + - db + register: output + + - ansible.builtin.debug: + var: output + + - ansible.builtin.assert: + that: + - "web.flask_web_1.state.running" + - "db.flask_db_1.state.running" +''' + +RETURN = ''' +services: + description: + - A dictionary mapping the service's name to a dictionary of containers. + returned: success + type: complex + contains: + container_name: + description: Name of the container. Format is C(project_service_#). + returned: success + type: complex + contains: + cmd: + description: One or more commands to be executed in the container. + returned: success + type: list + elements: str + example: ["postgres"] + image: + description: Name of the image from which the container was built. + returned: success + type: str + example: postgres + labels: + description: Meta data assigned to the container. + returned: success + type: dict + example: {...} + networks: + description: Contains a dictionary for each network to which the container is a member. + returned: success + type: list + elements: dict + contains: + IPAddress: + description: The IP address assigned to the container. + returned: success + type: str + example: 172.17.0.2 + IPPrefixLen: + description: Number of bits used by the subnet. + returned: success + type: int + example: 16 + aliases: + description: Aliases assigned to the container by the network. + returned: success + type: list + elements: str + example: ['db'] + globalIPv6: + description: IPv6 address assigned to the container. + returned: success + type: str + example: '' + globalIPv6PrefixLen: + description: IPv6 subnet length. + returned: success + type: int + example: 0 + links: + description: List of container names to which this container is linked. + returned: success + type: list + elements: str + example: null + macAddress: + description: Mac Address assigned to the virtual NIC. + returned: success + type: str + example: "02:42:ac:11:00:02" + state: + description: Information regarding the current disposition of the container. + returned: success + type: dict + contains: + running: + description: Whether or not the container is up with a running process. + returned: success + type: bool + example: true + status: + description: Description of the running state. + returned: success + type: str + example: running + +actions: + description: Provides the actions to be taken on each service as determined by compose. + returned: when in check mode or I(debug) is C(yes) + type: complex + contains: + service_name: + description: Name of the service. + returned: always + type: complex + contains: + pulled_image: + description: Provides image details when a new image is pulled for the service. + returned: on image pull + type: complex + contains: + name: + description: name of the image + returned: always + type: str + id: + description: image hash + returned: always + type: str + built_image: + description: Provides image details when a new image is built for the service. + returned: on image build + type: complex + contains: + name: + description: name of the image + returned: always + type: str + id: + description: image hash + returned: always + type: str + + action: + description: A descriptive name of the action to be performed on the service's containers. + returned: always + type: list + elements: str + contains: + id: + description: the container's long ID + returned: always + type: str + name: + description: the container's name + returned: always + type: str + short_id: + description: the container's short ID + returned: always + type: str +''' + +import os +import re +import sys +import tempfile +import traceback +from contextlib import contextmanager +from distutils.version import LooseVersion + +try: + import yaml + HAS_YAML = True + HAS_YAML_EXC = None +except ImportError as dummy: + HAS_YAML = False + HAS_YAML_EXC = traceback.format_exc() + +try: + from docker.errors import DockerException +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +try: + from compose import __version__ as compose_version + from compose.cli.command import project_from_options + from compose.service import NoSuchImageError + from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt + from compose.const import DEFAULT_TIMEOUT, LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF + HAS_COMPOSE = True + HAS_COMPOSE_EXC = None + MINIMUM_COMPOSE_VERSION = '1.7.0' +except ImportError as dummy: + HAS_COMPOSE = False + HAS_COMPOSE_EXC = traceback.format_exc() + DEFAULT_TIMEOUT = 10 + +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClient, + DockerBaseClass, + RequestException, +) + + +AUTH_PARAM_MAPPING = { + u'docker_host': u'--host', + u'tls': u'--tls', + u'cacert_path': u'--tlscacert', + u'cert_path': u'--tlscert', + u'key_path': u'--tlskey', + u'tls_verify': u'--tlsverify' +} + + +@contextmanager +def stdout_redirector(path_name): + old_stdout = sys.stdout + fd = open(path_name, 'w') + sys.stdout = fd + try: + yield + finally: + sys.stdout = old_stdout + + +@contextmanager +def stderr_redirector(path_name): + old_fh = sys.stderr + fd = open(path_name, 'w') + sys.stderr = fd + try: + yield + finally: + sys.stderr = old_fh + + +def make_redirection_tempfiles(): + dummy, out_redir_name = tempfile.mkstemp(prefix="ansible") + dummy, err_redir_name = tempfile.mkstemp(prefix="ansible") + return (out_redir_name, err_redir_name) + + +def cleanup_redirection_tempfiles(out_name, err_name): + for i in [out_name, err_name]: + os.remove(i) + + +def get_redirected_output(path_name): + output = [] + with open(path_name, 'r') as fd: + for line in fd: + # strip terminal format/color chars + new_line = re.sub(r'\x1b\[.+m', '', line) + output.append(new_line) + os.remove(path_name) + return output + + +def attempt_extract_errors(exc_str, stdout, stderr): + errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')] + errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')]) + + warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')] + warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')]) + + # assume either the exception body (if present) or the last warning was the 'most' + # fatal. + + if exc_str.strip(): + msg = exc_str.strip() + elif errors: + msg = errors[-1].encode('utf-8') + else: + msg = 'unknown cause' + + return { + 'warnings': [w.encode('utf-8') for w in warnings], + 'errors': [e.encode('utf-8') for e in errors], + 'msg': msg, + 'module_stderr': ''.join(stderr), + 'module_stdout': ''.join(stdout) + } + + +def get_failure_info(exc, out_name, err_name=None, msg_format='%s'): + if err_name is None: + stderr = [] + else: + stderr = get_redirected_output(err_name) + stdout = get_redirected_output(out_name) + + reason = attempt_extract_errors(str(exc), stdout, stderr) + reason['msg'] = msg_format % reason['msg'] + return reason + + +class ContainerManager(DockerBaseClass): + + def __init__(self, client): + + super(ContainerManager, self).__init__() + + self.client = client + self.project_src = None + self.files = None + self.project_name = None + self.state = None + self.definition = None + self.hostname_check = None + self.timeout = None + self.remove_images = None + self.remove_orphans = None + self.remove_volumes = None + self.stopped = None + self.restarted = None + self.recreate = None + self.build = None + self.dependencies = None + self.services = None + self.scale = None + self.debug = None + self.pull = None + self.nocache = None + + for key, value in client.module.params.items(): + setattr(self, key, value) + + self.check_mode = client.check_mode + + if not self.debug: + self.debug = client.module._debug + + self.options = dict() + self.options.update(self._get_auth_options()) + self.options[u'--skip-hostname-check'] = (not self.hostname_check) + + if self.project_name: + self.options[u'--project-name'] = self.project_name + + if self.files: + self.options[u'--file'] = self.files + + if not HAS_COMPOSE: + self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" % + HAS_COMPOSE_EXC) + + if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION): + self.client.fail("Found docker-compose version %s. Minimum required version is %s. " + "Upgrade docker-compose to a min version of %s." % + (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION)) + + if self.restarted and self.stopped: + self.client.fail("Cannot use restarted and stopped at the same time.") + + self.log("options: ") + self.log(self.options, pretty_print=True) + + if self.definition: + if not HAS_YAML: + self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % HAS_YAML_EXC) + + if not self.project_name: + self.client.fail("Parameter error - project_name required when providing definition.") + + self.project_src = tempfile.mkdtemp(prefix="ansible") + compose_file = os.path.join(self.project_src, "docker-compose.yml") + try: + self.log('writing: ') + self.log(yaml.dump(self.definition, default_flow_style=False)) + with open(compose_file, 'w') as f: + f.write(yaml.dump(self.definition, default_flow_style=False)) + except Exception as exc: + self.client.fail("Error writing to %s - %s" % (compose_file, str(exc))) + else: + if not self.project_src: + self.client.fail("Parameter error - project_src required.") + + try: + self.log("project_src: %s" % self.project_src) + self.project = project_from_options(self.project_src, self.options) + except Exception as exc: + self.client.fail("Configuration error - %s" % str(exc)) + + def exec_module(self): + result = dict() + + if self.state == 'present': + result = self.cmd_up() + elif self.state == 'absent': + result = self.cmd_down() + + if self.definition: + compose_file = os.path.join(self.project_src, "docker-compose.yml") + self.log("removing %s" % compose_file) + os.remove(compose_file) + self.log("removing %s" % self.project_src) + os.rmdir(self.project_src) + + if not self.check_mode and not self.debug and result.get('actions'): + result.pop('actions') + + return result + + def _get_auth_options(self): + options = dict() + for key, value in self.client.auth_params.items(): + if value is not None: + option = AUTH_PARAM_MAPPING.get(key) + if option: + options[option] = value + return options + + def cmd_up(self): + + start_deps = self.dependencies + service_names = self.services + detached = True + result = dict(changed=False, actions=[], services=dict()) + + up_options = { + u'--no-recreate': False, + u'--build': False, + u'--no-build': False, + u'--no-deps': False, + u'--force-recreate': False, + } + + if self.recreate == 'never': + up_options[u'--no-recreate'] = True + elif self.recreate == 'always': + up_options[u'--force-recreate'] = True + + if self.remove_orphans: + up_options[u'--remove-orphans'] = True + + converge = convergence_strategy_from_opts(up_options) + self.log("convergence strategy: %s" % converge) + + if self.pull: + pull_output = self.cmd_pull() + result['changed'] = pull_output['changed'] + result['actions'] += pull_output['actions'] + + if self.build: + build_output = self.cmd_build() + result['changed'] = build_output['changed'] + result['actions'] += build_output['actions'] + + if self.remove_orphans: + containers = self.client.containers( + filters={ + 'label': [ + '{0}={1}'.format(LABEL_PROJECT, self.project.name), + '{0}={1}'.format(LABEL_ONE_OFF, "False") + ], + } + ) + + orphans = [] + for container in containers: + service_name = container.get('Labels', {}).get(LABEL_SERVICE) + if service_name not in self.project.service_names: + orphans.append(service_name) + + if orphans: + result['changed'] = True + + for service in self.project.services: + if not service_names or service.name in service_names: + plan = service.convergence_plan(strategy=converge) + if plan.action != 'noop': + result['changed'] = True + result_action = dict(service=service.name) + result_action[plan.action] = [] + for container in plan.containers: + result_action[plan.action].append(dict( + id=container.id, + name=container.name, + short_id=container.short_id, + )) + result['actions'].append(result_action) + + if not self.check_mode and result['changed'] and not self.stopped: + out_redir_name, err_redir_name = make_redirection_tempfiles() + try: + with stdout_redirector(out_redir_name): + with stderr_redirector(err_redir_name): + do_build = build_action_from_opts(up_options) + self.log('Setting do_build to %s' % do_build) + self.project.up( + service_names=service_names, + start_deps=start_deps, + strategy=converge, + do_build=do_build, + detached=detached, + remove_orphans=self.remove_orphans, + timeout=self.timeout) + except Exception as exc: + fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, + msg_format="Error starting project %s") + self.client.fail(**fail_reason) + else: + cleanup_redirection_tempfiles(out_redir_name, err_redir_name) + + if self.stopped: + stop_output = self.cmd_stop(service_names) + result['changed'] = stop_output['changed'] + result['actions'] += stop_output['actions'] + + if self.restarted: + restart_output = self.cmd_restart(service_names) + result['changed'] = restart_output['changed'] + result['actions'] += restart_output['actions'] + + if self.scale: + scale_output = self.cmd_scale() + result['changed'] = scale_output['changed'] + result['actions'] += scale_output['actions'] + + for service in self.project.services: + service_facts = dict() + result['services'][service.name] = service_facts + for container in service.containers(stopped=True): + inspection = container.inspect() + # pare down the inspection data to the most useful bits + facts = dict( + cmd=[], + labels=dict(), + image=None, + state=dict( + running=None, + status=None + ), + networks=dict() + ) + if inspection['Config'].get('Cmd', None) is not None: + facts['cmd'] = inspection['Config']['Cmd'] + if inspection['Config'].get('Labels', None) is not None: + facts['labels'] = inspection['Config']['Labels'] + if inspection['Config'].get('Image', None) is not None: + facts['image'] = inspection['Config']['Image'] + if inspection['State'].get('Running', None) is not None: + facts['state']['running'] = inspection['State']['Running'] + if inspection['State'].get('Status', None) is not None: + facts['state']['status'] = inspection['State']['Status'] + + if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'): + networks = inspection['NetworkSettings']['Networks'] + for key in networks: + facts['networks'][key] = dict( + aliases=[], + globalIPv6=None, + globalIPv6PrefixLen=0, + IPAddress=None, + IPPrefixLen=0, + links=None, + macAddress=None, + ) + if networks[key].get('Aliases', None) is not None: + facts['networks'][key]['aliases'] = networks[key]['Aliases'] + if networks[key].get('GlobalIPv6Address', None) is not None: + facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address'] + if networks[key].get('GlobalIPv6PrefixLen', None) is not None: + facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen'] + if networks[key].get('IPAddress', None) is not None: + facts['networks'][key]['IPAddress'] = networks[key]['IPAddress'] + if networks[key].get('IPPrefixLen', None) is not None: + facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen'] + if networks[key].get('Links', None) is not None: + facts['networks'][key]['links'] = networks[key]['Links'] + if networks[key].get('MacAddress', None) is not None: + facts['networks'][key]['macAddress'] = networks[key]['MacAddress'] + + service_facts[container.name] = facts + + return result + + def cmd_pull(self): + result = dict( + changed=False, + actions=[], + ) + + if not self.check_mode: + for service in self.project.get_services(self.services, include_deps=False): + if 'image' not in service.options: + continue + + self.log('Pulling image for service %s' % service.name) + # store the existing image ID + old_image_id = '' + try: + image = service.image() + if image and image.get('Id'): + old_image_id = image['Id'] + except NoSuchImageError: + pass + except Exception as exc: + self.client.fail("Error: service image lookup failed - %s" % str(exc)) + + out_redir_name, err_redir_name = make_redirection_tempfiles() + # pull the image + try: + with stdout_redirector(out_redir_name): + with stderr_redirector(err_redir_name): + service.pull(ignore_pull_failures=False) + except Exception as exc: + fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, + msg_format="Error: pull failed with %s") + self.client.fail(**fail_reason) + else: + cleanup_redirection_tempfiles(out_redir_name, err_redir_name) + + # store the new image ID + new_image_id = '' + try: + image = service.image() + if image and image.get('Id'): + new_image_id = image['Id'] + except NoSuchImageError as exc: + self.client.fail("Error: service image lookup failed after pull - %s" % str(exc)) + + if new_image_id != old_image_id: + # if a new image was pulled + result['changed'] = True + result['actions'].append(dict( + service=service.name, + pulled_image=dict( + name=service.image_name, + id=new_image_id + ) + )) + return result + + def cmd_build(self): + result = dict( + changed=False, + actions=[] + ) + if not self.check_mode: + for service in self.project.get_services(self.services, include_deps=False): + if service.can_be_built(): + self.log('Building image for service %s' % service.name) + # store the existing image ID + old_image_id = '' + try: + image = service.image() + if image and image.get('Id'): + old_image_id = image['Id'] + except NoSuchImageError: + pass + except Exception as exc: + self.client.fail("Error: service image lookup failed - %s" % str(exc)) + + out_redir_name, err_redir_name = make_redirection_tempfiles() + # build the image + try: + with stdout_redirector(out_redir_name): + with stderr_redirector(err_redir_name): + new_image_id = service.build(pull=self.pull, no_cache=self.nocache) + except Exception as exc: + fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, + msg_format="Error: build failed with %s") + self.client.fail(**fail_reason) + else: + cleanup_redirection_tempfiles(out_redir_name, err_redir_name) + + if new_image_id not in old_image_id: + # if a new image was built + result['changed'] = True + result['actions'].append(dict( + service=service.name, + built_image=dict( + name=service.image_name, + id=new_image_id + ) + )) + return result + + def cmd_down(self): + result = dict( + changed=False, + actions=[] + ) + for service in self.project.services: + containers = service.containers(stopped=True) + if len(containers): + result['changed'] = True + result['actions'].append(dict( + service=service.name, + deleted=[container.name for container in containers] + )) + if not self.check_mode and result['changed']: + image_type = image_type_from_opt('--rmi', self.remove_images) + out_redir_name, err_redir_name = make_redirection_tempfiles() + try: + with stdout_redirector(out_redir_name): + with stderr_redirector(err_redir_name): + self.project.down(image_type, self.remove_volumes, self.remove_orphans) + except Exception as exc: + fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, + msg_format="Error stopping project - %s") + self.client.fail(**fail_reason) + else: + cleanup_redirection_tempfiles(out_redir_name, err_redir_name) + return result + + def cmd_stop(self, service_names): + result = dict( + changed=False, + actions=[] + ) + for service in self.project.services: + if not service_names or service.name in service_names: + service_res = dict( + service=service.name, + stop=[] + ) + for container in service.containers(stopped=False): + result['changed'] = True + service_res['stop'].append(dict( + id=container.id, + name=container.name, + short_id=container.short_id + )) + result['actions'].append(service_res) + if not self.check_mode and result['changed']: + out_redir_name, err_redir_name = make_redirection_tempfiles() + try: + with stdout_redirector(out_redir_name): + with stderr_redirector(err_redir_name): + self.project.stop(service_names=service_names, timeout=self.timeout) + except Exception as exc: + fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, + msg_format="Error stopping project %s") + self.client.fail(**fail_reason) + else: + cleanup_redirection_tempfiles(out_redir_name, err_redir_name) + return result + + def cmd_restart(self, service_names): + result = dict( + changed=False, + actions=[] + ) + + for service in self.project.services: + if not service_names or service.name in service_names: + service_res = dict( + service=service.name, + restart=[] + ) + for container in service.containers(stopped=True): + result['changed'] = True + service_res['restart'].append(dict( + id=container.id, + name=container.name, + short_id=container.short_id + )) + result['actions'].append(service_res) + + if not self.check_mode and result['changed']: + out_redir_name, err_redir_name = make_redirection_tempfiles() + try: + with stdout_redirector(out_redir_name): + with stderr_redirector(err_redir_name): + self.project.restart(service_names=service_names, timeout=self.timeout) + except Exception as exc: + fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, + msg_format="Error restarting project %s") + self.client.fail(**fail_reason) + else: + cleanup_redirection_tempfiles(out_redir_name, err_redir_name) + return result + + def cmd_scale(self): + result = dict( + changed=False, + actions=[] + ) + for service in self.project.services: + if service.name in self.scale: + service_res = dict( + service=service.name, + scale=0 + ) + containers = service.containers(stopped=True) + scale = self.parse_scale(service.name) + if len(containers) != scale: + result['changed'] = True + service_res['scale'] = scale - len(containers) + if not self.check_mode: + out_redir_name, err_redir_name = make_redirection_tempfiles() + try: + with stdout_redirector(out_redir_name): + with stderr_redirector(err_redir_name): + service.scale(scale) + except Exception as exc: + fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, + msg_format="Error scaling {0} - %s".format(service.name)) + self.client.fail(**fail_reason) + else: + cleanup_redirection_tempfiles(out_redir_name, err_redir_name) + result['actions'].append(service_res) + return result + + def parse_scale(self, service_name): + try: + return int(self.scale[service_name]) + except ValueError: + self.client.fail("Error scaling %s - expected int, got %s", + service_name, str(type(self.scale[service_name]))) + + +def main(): + argument_spec = dict( + project_src=dict(type='path'), + project_name=dict(type='str',), + files=dict(type='list', elements='path'), + state=dict(type='str', default='present', choices=['absent', 'present']), + definition=dict(type='dict'), + hostname_check=dict(type='bool', default=False), + recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']), + build=dict(type='bool', default=False), + remove_images=dict(type='str', choices=['all', 'local']), + remove_volumes=dict(type='bool', default=False), + remove_orphans=dict(type='bool', default=False), + stopped=dict(type='bool', default=False), + restarted=dict(type='bool', default=False), + scale=dict(type='dict'), + services=dict(type='list', elements='str'), + dependencies=dict(type='bool', default=True), + pull=dict(type='bool', default=False), + nocache=dict(type='bool', default=False), + debug=dict(type='bool', default=False), + timeout=dict(type='int', default=DEFAULT_TIMEOUT) + ) + + mutually_exclusive = [ + ('definition', 'project_src'), + ('definition', 'files') + ] + + client = AnsibleDockerClient( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + min_docker_api_version='1.20', + ) + + try: + result = ContainerManager(client).exec_module() + client.module.exit_json(**result) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_config.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_config.py new file mode 100644 index 00000000..3791dda4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_config.py @@ -0,0 +1,299 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_config + +short_description: Manage docker configs. + + +description: + - Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm). + - Adds to the metadata of new configs 'ansible_key', an encrypted hash representation of the data, which is then used + in future runs to test if a config has changed. If 'ansible_key' is not present, then a config will not be updated + unless the I(force) option is set. + - Updates to configs are performed by removing the config and creating it again. +options: + data: + description: + - The value of the config. Required when state is C(present). + type: str + data_is_b64: + description: + - If set to C(true), the data is assumed to be Base64 encoded and will be + decoded before being used. + - To use binary I(data), it is better to keep it Base64 encoded and let it + be decoded by this option. + type: bool + default: no + labels: + description: + - "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string." + - If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating it again. + type: dict + force: + description: + - Use with state C(present) to always remove and recreate an existing config. + - If C(true), an existing config will be replaced, even if it has not been changed. + type: bool + default: no + name: + description: + - The name of the config. + type: str + required: yes + state: + description: + - Set to C(present), if the config should exist, and C(absent), if it should not. + type: str + default: present + choices: + - absent + - present + +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_2_documentation + + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0" + - "Docker API >= 1.30" + +author: + - Chris Houseknecht (@chouseknecht) + - John Hu (@ushuz) +''' + +EXAMPLES = ''' + +- name: Create config foo (from a file on the control machine) + community.docker.docker_config: + name: foo + # If the file is JSON or binary, Ansible might modify it (because + # it is first decoded and later re-encoded). Base64-encoding the + # file directly after reading it prevents this to happen. + data: "{{ lookup('file', '/path/to/config/file') | b64encode }}" + data_is_b64: true + state: present + +- name: Change the config data + community.docker.docker_config: + name: foo + data: Goodnight everyone! + labels: + bar: baz + one: '1' + state: present + +- name: Add a new label + community.docker.docker_config: + name: foo + data: Goodnight everyone! + labels: + bar: baz + one: '1' + # Adding a new label will cause a remove/create of the config + two: '2' + state: present + +- name: No change + community.docker.docker_config: + name: foo + data: Goodnight everyone! + labels: + bar: baz + one: '1' + # Even though 'two' is missing, there is no change to the existing config + state: present + +- name: Update an existing label + community.docker.docker_config: + name: foo + data: Goodnight everyone! + labels: + bar: monkey # Changing a label will cause a remove/create of the config + one: '1' + state: present + +- name: Force the (re-)creation of the config + community.docker.docker_config: + name: foo + data: Goodnight everyone! + force: yes + state: present + +- name: Remove config foo + community.docker.docker_config: + name: foo + state: absent +''' + +RETURN = ''' +config_id: + description: + - The ID assigned by Docker to the config object. + returned: success and I(state) is C(present) + type: str + sample: 'hzehrmyjigmcp2gb6nlhmjqcv' +''' + +import base64 +import hashlib +import traceback + +try: + from docker.errors import DockerException, APIError +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClient, + DockerBaseClass, + compare_generic, + RequestException, +) +from ansible.module_utils._text import to_native, to_bytes + + +class ConfigManager(DockerBaseClass): + + def __init__(self, client, results): + + super(ConfigManager, self).__init__() + + self.client = client + self.results = results + self.check_mode = self.client.check_mode + + parameters = self.client.module.params + self.name = parameters.get('name') + self.state = parameters.get('state') + self.data = parameters.get('data') + if self.data is not None: + if parameters.get('data_is_b64'): + self.data = base64.b64decode(self.data) + else: + self.data = to_bytes(self.data) + self.labels = parameters.get('labels') + self.force = parameters.get('force') + self.data_key = None + + def __call__(self): + if self.state == 'present': + self.data_key = hashlib.sha224(self.data).hexdigest() + self.present() + elif self.state == 'absent': + self.absent() + + def get_config(self): + ''' Find an existing config. ''' + try: + configs = self.client.configs(filters={'name': self.name}) + except APIError as exc: + self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc))) + + for config in configs: + if config['Spec']['Name'] == self.name: + return config + return None + + def create_config(self): + ''' Create a new config ''' + config_id = None + # We can't see the data after creation, so adding a label we can use for idempotency check + labels = { + 'ansible_key': self.data_key + } + if self.labels: + labels.update(self.labels) + + try: + if not self.check_mode: + config_id = self.client.create_config(self.name, self.data, labels=labels) + except APIError as exc: + self.client.fail("Error creating config: %s" % to_native(exc)) + + if isinstance(config_id, dict): + config_id = config_id['ID'] + + return config_id + + def present(self): + ''' Handles state == 'present', creating or updating the config ''' + config = self.get_config() + if config: + self.results['config_id'] = config['ID'] + data_changed = False + attrs = config.get('Spec', {}) + if attrs.get('Labels', {}).get('ansible_key'): + if attrs['Labels']['ansible_key'] != self.data_key: + data_changed = True + labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict') + if data_changed or labels_changed or self.force: + # if something changed or force, delete and re-create the config + self.absent() + config_id = self.create_config() + self.results['changed'] = True + self.results['config_id'] = config_id + else: + self.results['changed'] = True + self.results['config_id'] = self.create_config() + + def absent(self): + ''' Handles state == 'absent', removing the config ''' + config = self.get_config() + if config: + try: + if not self.check_mode: + self.client.remove_config(config['ID']) + except APIError as exc: + self.client.fail("Error removing config %s: %s" % (self.name, to_native(exc))) + self.results['changed'] = True + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + data=dict(type='str'), + data_is_b64=dict(type='bool', default=False), + labels=dict(type='dict'), + force=dict(type='bool', default=False) + ) + + required_if = [ + ('state', 'present', ['data']) + ] + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + min_docker_version='2.6.0', + min_docker_api_version='1.30', + ) + + try: + results = dict( + changed=False, + ) + + ConfigManager(client, results)() + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_container.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_container.py new file mode 100644 index 00000000..033b5c72 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_container.py @@ -0,0 +1,3591 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_container + +short_description: manage docker containers + +description: + - Manage the life cycle of docker containers. + - Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken. + + +notes: + - For most config changes, the container needs to be recreated, i.e. the existing container has to be destroyed and + a new one created. This can cause unexpected data loss and downtime. You can use the I(comparisons) option to + prevent this. + - If the module needs to recreate the container, it will only use the options provided to the module to create the + new container (except I(image)). Therefore, always specify *all* options relevant to the container. + - When I(restart) is set to C(true), the module will only restart the container if no config changes are detected. + Please note that several options have default values; if the container to be restarted uses different values for + these options, it will be recreated instead. The options with default values which can cause this are I(auto_remove), + I(detach), I(init), I(interactive), I(memory), I(paused), I(privileged), I(read_only) and I(tty). This behavior + can be changed by setting I(container_default_behavior) to C(no_defaults), which will be the default value from + community.docker 2.0.0 on. + +options: + auto_remove: + description: + - Enable auto-removal of the container on daemon side when the container's process exits. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(no). + type: bool + blkio_weight: + description: + - Block IO (relative weight), between 10 and 1000. + type: int + capabilities: + description: + - List of capabilities to add to the container. + type: list + elements: str + cap_drop: + description: + - List of capabilities to drop from the container. + type: list + elements: str + cgroup_parent: + description: + - Specify the parent cgroup for the container. + type: str + version_added: 1.1.0 + cleanup: + description: + - Use with I(detach=false) to remove the container after successful execution. + type: bool + default: no + command: + description: + - Command to execute when the container starts. A command may be either a string or a list. + - Prior to version 2.4, strings were split on commas. + type: raw + comparisons: + description: + - Allows to specify how properties of existing containers are compared with + module options to decide whether the container should be recreated / updated + or not. + - Only options which correspond to the state of a container as handled by the + Docker daemon can be specified, as well as C(networks). + - Must be a dictionary specifying for an option one of the keys C(strict), C(ignore) + and C(allow_more_present). + - If C(strict) is specified, values are tested for equality, and changes always + result in updating or restarting. If C(ignore) is specified, changes are ignored. + - C(allow_more_present) is allowed only for lists, sets and dicts. If it is + specified for lists or sets, the container will only be updated or restarted if + the module option contains a value which is not present in the container's + options. If the option is specified for a dict, the container will only be updated + or restarted if the module option contains a key which isn't present in the + container's option, or if the value of a key present differs. + - The wildcard option C(*) can be used to set one of the default values C(strict) + or C(ignore) to *all* comparisons which are not explicitly set to other values. + - See the examples for details. + type: dict + container_default_behavior: + description: + - Various module options used to have default values. This causes problems with + containers which use different values for these options. + - The default value is C(compatibility), which will ensure that the default values + are used when the values are not explicitly specified by the user. + - From community.docker 2.0.0 on, the default value will switch to C(no_defaults). To avoid + deprecation warnings, please set I(container_default_behavior) to an explicit + value. + - This affects the I(auto_remove), I(detach), I(init), I(interactive), I(memory), + I(paused), I(privileged), I(read_only) and I(tty) options. + type: str + choices: + - compatibility + - no_defaults + cpu_period: + description: + - Limit CPU CFS (Completely Fair Scheduler) period. + - See I(cpus) for an easier to use alternative. + type: int + cpu_quota: + description: + - Limit CPU CFS (Completely Fair Scheduler) quota. + - See I(cpus) for an easier to use alternative. + type: int + cpus: + description: + - Specify how much of the available CPU resources a container can use. + - A value of C(1.5) means that at most one and a half CPU (core) will be used. + type: float + cpuset_cpus: + description: + - CPUs in which to allow execution C(1,3) or C(1-3). + type: str + cpuset_mems: + description: + - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1). + type: str + cpu_shares: + description: + - CPU shares (relative weight). + type: int + default_host_ip: + description: + - Define the default host IP to use. + - Must be an empty string, an IPv4 address, or an IPv6 address. + - With Docker 20.10.2 or newer, this should be set to an empty string (C("")) to avoid the + port bindings without an explicit IP address to only bind to IPv4. + See U(https://github.com/ansible-collections/community.docker/issues/70) for details. + - By default, the module will try to auto-detect this value from the C(bridge) network's + C(com.docker.network.bridge.host_binding_ipv4) option. If it cannot auto-detect it, it + will fall back to C(0.0.0.0). + type: str + version_added: 1.2.0 + detach: + description: + - Enable detached mode to leave the container running in background. + - If disabled, the task will reflect the status of the container run (failed if the command failed). + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(yes). + type: bool + devices: + description: + - List of host device bindings to add to the container. + - "Each binding is a mapping expressed in the format C(<path_on_host>:<path_in_container>:<cgroup_permissions>)." + type: list + elements: str + device_read_bps: + description: + - "List of device path and read rate (bytes per second) from device." + type: list + elements: dict + suboptions: + path: + description: + - Device path in the container. + type: str + required: yes + rate: + description: + - "Device read limit in format C(<number>[<unit>])." + - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - "Omitting the unit defaults to bytes." + type: str + required: yes + device_write_bps: + description: + - "List of device and write rate (bytes per second) to device." + type: list + elements: dict + suboptions: + path: + description: + - Device path in the container. + type: str + required: yes + rate: + description: + - "Device read limit in format C(<number>[<unit>])." + - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - "Omitting the unit defaults to bytes." + type: str + required: yes + device_read_iops: + description: + - "List of device and read rate (IO per second) from device." + type: list + elements: dict + suboptions: + path: + description: + - Device path in the container. + type: str + required: yes + rate: + description: + - "Device read limit." + - "Must be a positive integer." + type: int + required: yes + device_write_iops: + description: + - "List of device and write rate (IO per second) to device." + type: list + elements: dict + suboptions: + path: + description: + - Device path in the container. + type: str + required: yes + rate: + description: + - "Device read limit." + - "Must be a positive integer." + type: int + required: yes + device_requests: + description: + - Allows to request additional resources, such as GPUs. + type: list + elements: dict + suboptions: + capabilities: + description: + - List of lists of strings to request capabilities. + - The top-level list entries are combined by OR, and for every list entry, + the entries in the list it contains are combined by AND. + - The driver tries to satisfy one of the sub-lists. + - Available capabilities for the C(nvidia) driver can be found at + U(https://github.com/NVIDIA/nvidia-container-runtime). + type: list + elements: list + count: + description: + - Number or devices to request. + - Set to C(-1) to request all available devices. + type: int + device_ids: + description: + - List of device IDs. + type: list + elements: str + driver: + description: + - Which driver to use for this device. + type: str + options: + description: + - Driver-specific options. + type: dict + version_added: 0.1.0 + dns_opts: + description: + - List of DNS options. + type: list + elements: str + dns_servers: + description: + - List of custom DNS servers. + type: list + elements: str + dns_search_domains: + description: + - List of custom DNS search domains. + type: list + elements: str + domainname: + description: + - Container domainname. + type: str + env: + description: + - Dictionary of key,value pairs. + - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (e.g. C("true")) in order to avoid data loss. + type: dict + env_file: + description: + - Path to a file, present on the target, containing environment variables I(FOO=BAR). + - If variable also present in I(env), then the I(env) value will override. + type: path + entrypoint: + description: + - Command that overwrites the default C(ENTRYPOINT) of the image. + type: list + elements: str + etc_hosts: + description: + - Dict of host-to-IP mappings, where each host name is a key in the dictionary. + Each host name will be added to the container's C(/etc/hosts) file. + type: dict + exposed_ports: + description: + - List of additional container ports which informs Docker that the container + listens on the specified network ports at runtime. + - If the port is already exposed using C(EXPOSE) in a Dockerfile, it does not + need to be exposed again. + type: list + elements: str + aliases: + - exposed + - expose + force_kill: + description: + - Use the kill command when stopping a running container. + type: bool + default: no + aliases: + - forcekill + groups: + description: + - List of additional group names and/or IDs that the container process will run as. + type: list + elements: str + healthcheck: + description: + - Configure a check that is run to determine whether or not containers for this service are "healthy". + - "See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck) + for details on how healthchecks work." + - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format + that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + type: dict + suboptions: + test: + description: + - Command to run to check health. + - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL). + type: raw + interval: + description: + - Time between running the check. + - The default used by the Docker daemon is C(30s). + type: str + timeout: + description: + - Maximum time to allow one check to run. + - The default used by the Docker daemon is C(30s). + type: str + retries: + description: + - Consecutive number of failures needed to report unhealthy. + - The default used by the Docker daemon is C(3). + type: int + start_period: + description: + - Start period for the container to initialize before starting health-retries countdown. + - The default used by the Docker daemon is C(0s). + type: str + hostname: + description: + - The container's hostname. + type: str + ignore_image: + description: + - When I(state) is C(present) or C(started), the module compares the configuration of an existing + container to requested configuration. The evaluation includes the image version. If the image + version in the registry does not match the container, the container will be recreated. You can + stop this behavior by setting I(ignore_image) to C(True). + - "*Warning:* This option is ignored if C(image: ignore) or C(*: ignore) is specified in the + I(comparisons) option." + type: bool + default: no + image: + description: + - Repository path and tag used to create the container. If an image is not found or pull is true, the image + will be pulled from the registry. If no tag is included, C(latest) will be used. + - Can also be an image ID. If this is the case, the image is assumed to be available locally. + The I(pull) option is ignored for this case. + type: str + init: + description: + - Run an init inside the container that forwards signals and reaps processes. + - This option requires Docker API >= 1.25. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(no). + type: bool + interactive: + description: + - Keep stdin open after a container is launched, even if not attached. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(no). + type: bool + ipc_mode: + description: + - Set the IPC mode for the container. + - Can be one of C(container:<name|id>) to reuse another container's IPC namespace or C(host) to use + the host's IPC namespace within the container. + type: str + keep_volumes: + description: + - Retain anonymous volumes associated with a removed container. + type: bool + default: yes + kill_signal: + description: + - Override default signal used to kill a running container. + type: str + kernel_memory: + description: + - "Kernel memory limit in format C(<number>[<unit>]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)." + - Omitting the unit defaults to bytes. + type: str + labels: + description: + - Dictionary of key value pairs. + type: dict + links: + description: + - List of name aliases for linked containers in the format C(container_name:alias). + - Setting this will force container to be restarted. + type: list + elements: str + log_driver: + description: + - Specify the logging driver. Docker uses C(json-file) by default. + - See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices. + type: str + log_options: + description: + - Dictionary of options specific to the chosen I(log_driver). + - See U(https://docs.docker.com/engine/admin/logging/overview/) for details. + type: dict + aliases: + - log_opt + mac_address: + description: + - Container MAC address (e.g. 92:d0:c6:0a:29:33). + type: str + memory: + description: + - "Memory limit in format C(<number>[<unit>]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - Omitting the unit defaults to bytes. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C("0"). + type: str + memory_reservation: + description: + - "Memory soft limit in format C(<number>[<unit>]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - Omitting the unit defaults to bytes. + type: str + memory_swap: + description: + - "Total memory limit (memory + swap) in format C(<number>[<unit>]). + Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B), + C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)." + - Omitting the unit defaults to bytes. + type: str + memory_swappiness: + description: + - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - If not set, the value will be remain the same if container exists and will be inherited + from the host machine if it is (re-)created. + type: int + mounts: + type: list + elements: dict + description: + - Specification for mounts to be added to the container. More powerful alternative to I(volumes). + suboptions: + target: + description: + - Path inside the container. + type: str + required: true + source: + description: + - Mount source (e.g. a volume name or a host path). + type: str + type: + description: + - The mount type. + - Note that C(npipe) is only supported by Docker for Windows. + type: str + choices: + - bind + - npipe + - tmpfs + - volume + default: volume + read_only: + description: + - Whether the mount should be read-only. + type: bool + consistency: + description: + - The consistency requirement for the mount. + type: str + choices: + - cached + - consistent + - default + - delegated + propagation: + description: + - Propagation mode. Only valid for the C(bind) type. + type: str + choices: + - private + - rprivate + - shared + - rshared + - slave + - rslave + no_copy: + description: + - False if the volume should be populated with the data from the target. Only valid for the C(volume) type. + - The default value is C(false). + type: bool + labels: + description: + - User-defined name and labels for the volume. Only valid for the C(volume) type. + type: dict + volume_driver: + description: + - Specify the volume driver. Only valid for the C(volume) type. + - See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details. + type: str + volume_options: + description: + - Dictionary of options specific to the chosen volume_driver. See + L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details. + type: dict + tmpfs_size: + description: + - "The size for the tmpfs mount in bytes in format <number>[<unit>]." + - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - "Omitting the unit defaults to bytes." + type: str + tmpfs_mode: + description: + - The permission mode for the tmpfs mount. + type: str + name: + description: + - Assign a name to a new container or match an existing container. + - When identifying an existing container name may be a name or a long or short container ID. + type: str + required: yes + network_mode: + description: + - Connect the container to a network. Choices are C(bridge), C(host), C(none), C(container:<name|id>), C(<network_name>) or C(default). + - "*Note* that from community.docker 2.0.0 on, if I(networks_cli_compatible) is C(true) and I(networks) contains at least one network, + the default value for I(network_mode) will be the name of the first network in the I(networks) list. You can prevent this + by explicitly specifying a value for I(network_mode), like the default value C(default) which will be used by Docker if + I(network_mode) is not specified." + type: str + userns_mode: + description: + - Set the user namespace mode for the container. Currently, the only valid value are C(host) and the empty string. + type: str + networks: + description: + - List of networks the container belongs to. + - For examples of the data structure and usage see EXAMPLES below. + - To remove a container from one or more networks, use the I(purge_networks) option. + - If I(networks_cli_compatible) is set to C(false), this will not remove the default network if I(networks) is specified. + This is different from the behavior of C(docker run ...). You need to explicitly use I(purge_networks) to enforce + the removal of the default network (and all other networks not explicitly mentioned in I(networks)) in that case. + type: list + elements: dict + suboptions: + name: + description: + - The network's name. + type: str + required: yes + ipv4_address: + description: + - The container's IPv4 address in this network. + type: str + ipv6_address: + description: + - The container's IPv6 address in this network. + type: str + links: + description: + - A list of containers to link to. + type: list + elements: str + aliases: + description: + - List of aliases for this container in this network. These names + can be used in the network to reach this container. + type: list + elements: str + networks_cli_compatible: + description: + - "If I(networks_cli_compatible) is set to C(yes) (default), this module will behave as + C(docker run --network) and will *not* add the default network if I(networks) is + specified. If I(networks) is not specified, the default network will be attached." + - "When I(networks_cli_compatible) is set to C(no) and networks are provided to the module + via the I(networks) option, the module behaves differently than C(docker run --network): + C(docker run --network other) will create a container with network C(other) attached, + but the default network not attached. This module with I(networks: {name: other}) will + create a container with both C(default) and C(other) attached. If I(purge_networks) is + set to C(yes), the C(default) network will be removed afterwards." + - "*Note* that docker CLI also sets I(network_mode) to the name of the first network + added if C(--network) is specified. For more compatibility with docker CLI, you + explicitly have to set I(network_mode) to the name of the first network you're + adding. This behavior will change for community.docker 2.0.0: then I(network_mode) will + automatically be set to the first network name in I(networks) if I(network_mode) + is not specified, I(networks) has at least one entry and I(networks_cli_compatible) + is C(true)." + type: bool + default: true + oom_killer: + description: + - Whether or not to disable OOM Killer for the container. + type: bool + oom_score_adj: + description: + - An integer value containing the score given to the container in order to tune + OOM killer preferences. + type: int + output_logs: + description: + - If set to true, output of the container command will be printed. + - Only effective when I(log_driver) is set to C(json-file) or C(journald). + type: bool + default: no + paused: + description: + - Use with the started state to pause running processes inside the container. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(no). + type: bool + pid_mode: + description: + - Set the PID namespace mode for the container. + - Note that Docker SDK for Python < 2.0 only supports C(host). Newer versions of the + Docker SDK for Python (docker) allow all values supported by the Docker daemon. + type: str + pids_limit: + description: + - Set PIDs limit for the container. It accepts an integer value. + - Set C(-1) for unlimited PIDs. + type: int + privileged: + description: + - Give extended privileges to the container. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(no). + type: bool + published_ports: + description: + - List of ports to publish from the container to the host. + - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a + container port, 9000 is a host port, and 0.0.0.0 is a host interface." + - Port ranges can be used for source and destination ports. If two ranges with + different lengths are specified, the shorter range will be used. + Since community.general 0.2.0, if the source port range has length 1, the port will not be assigned + to the first port of the destination range, but to a free port in that range. This is the + same behavior as for C(docker) command line utility. + - "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are *not* allowed. This + is different from the C(docker) command line utility. Use the L(dig lookup,../lookup/dig.html) + to resolve hostnames." + - A value of C(all) will publish all exposed container ports to random host ports, ignoring + any other mappings. + - If I(networks) parameter is provided, will inspect each network to see if there exists + a bridge network with optional parameter C(com.docker.network.bridge.host_binding_ipv4). + If such a network is found, then published ports where no host IP address is specified + will be bound to the host IP pointed to by C(com.docker.network.bridge.host_binding_ipv4). + Note that the first bridge network with a C(com.docker.network.bridge.host_binding_ipv4) + value encountered in the list of I(networks) is the one that will be used. + type: list + elements: str + aliases: + - ports + pull: + description: + - If true, always pull the latest version of an image. Otherwise, will only pull an image + when missing. + - "*Note:* images are only pulled when specified by name. If the image is specified + as a image ID (hash), it cannot be pulled." + type: bool + default: no + purge_networks: + description: + - Remove the container from ALL networks not included in I(networks) parameter. + - Any default networks such as C(bridge), if not found in I(networks), will be removed as well. + type: bool + default: no + read_only: + description: + - Mount the container's root file system as read-only. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(no). + type: bool + recreate: + description: + - Use with present and started states to force the re-creation of an existing container. + type: bool + default: no + removal_wait_timeout: + description: + - When removing an existing container, the docker daemon API call exists after the container + is scheduled for removal. Removal usually is very fast, but it can happen that during high I/O + load, removal can take longer. By default, the module will wait until the container has been + removed before trying to (re-)create it, however long this takes. + - By setting this option, the module will wait at most this many seconds for the container to be + removed. If the container is still in the removal phase after this many seconds, the module will + fail. + type: float + restart: + description: + - Use with started state to force a matching container to be stopped and restarted. + type: bool + default: no + restart_policy: + description: + - Container restart policy. + - Place quotes around C(no) option. + type: str + choices: + - 'no' + - 'on-failure' + - 'always' + - 'unless-stopped' + restart_retries: + description: + - Use with restart policy to control maximum number of restart attempts. + type: int + runtime: + description: + - Runtime to use for the container. + type: str + shm_size: + description: + - "Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses C(64M). + type: str + security_opts: + description: + - List of security options in the form of C("label:user:User"). + type: list + elements: str + state: + description: + - 'C(absent) - A container matching the specified name will be stopped and removed. Use I(force_kill) to kill the container + rather than stopping it. Use I(keep_volumes) to retain anonymous volumes associated with the removed container.' + - 'C(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no + container matches the name, a container will be created. If a container matches the name but the provided configuration + does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created + with the requested config.' + - 'C(started) - Asserts that the container is first C(present), and then if the container is not running moves it to a running + state. Use I(restart) to force a matching container to be stopped and restarted.' + - 'C(stopped) - Asserts that the container is first C(present), and then if the container is running moves it to a stopped + state.' + - To control what will be taken into account when comparing configuration, see the I(comparisons) option. To avoid that the + image version will be taken into account, you can also use the I(ignore_image) option. + - Use the I(recreate) option to always force re-creation of a matching container, even if it is running. + - If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because I(state) is + C(stopped), please use the I(force_kill) option. Use I(keep_volumes) to retain anonymous volumes associated with a removed container. + - Use I(keep_volumes) to retain anonymous volumes associated with a removed container. + type: str + default: started + choices: + - absent + - present + - stopped + - started + stop_signal: + description: + - Override default signal used to stop the container. + type: str + stop_timeout: + description: + - Number of seconds to wait for the container to stop before sending C(SIGKILL). + When the container is created by this module, its C(StopTimeout) configuration + will be set to this value. + - When the container is stopped, will be used as a timeout for stopping the + container. In case the container has a custom C(StopTimeout) configuration, + the behavior depends on the version of the docker daemon. New versions of + the docker daemon will always use the container's configured C(StopTimeout) + value if it has been configured. + type: int + tmpfs: + description: + - Mount a tmpfs directory. + type: list + elements: str + tty: + description: + - Allocate a pseudo-TTY. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(no). + type: bool + ulimits: + description: + - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)." + type: list + elements: str + sysctls: + description: + - Dictionary of key,value pairs. + type: dict + user: + description: + - Sets the username or UID used and optionally the groupname or GID for the specified command. + - "Can be of the forms C(user), C(user:group), C(uid), C(uid:gid), C(user:gid) or C(uid:group)." + type: str + uts: + description: + - Set the UTS namespace mode for the container. + type: str + volumes: + description: + - List of volumes to mount within the container. + - "Use docker CLI-style syntax: C(/host:/container[:mode])" + - "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent), + C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and + C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes." + - SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume. + - "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw), + C(z), and C(Z)." + type: list + elements: str + volume_driver: + description: + - The container volume driver. + type: str + volumes_from: + description: + - List of container names or IDs to get volumes from. + type: list + elements: str + working_dir: + description: + - Path to the working directory. + type: str +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + + +author: + - "Cove Schneider (@cove)" + - "Joshua Conner (@joshuaconner)" + - "Pavel Antonov (@softzilla)" + - "Thomas Steinbach (@ThomasSteinbach)" + - "Philippe Jandot (@zfil)" + - "Daan Oosterveld (@dusdanig)" + - "Chris Houseknecht (@chouseknecht)" + - "Kassian Sun (@kassiansun)" + - "Felix Fontein (@felixfontein)" + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.20" +''' + +EXAMPLES = ''' +- name: Create a data container + community.docker.docker_container: + name: mydata + image: busybox + volumes: + - /data + +- name: Re-create a redis container + community.docker.docker_container: + name: myredis + image: redis + command: redis-server --appendonly yes + state: present + recreate: yes + exposed_ports: + - 6379 + volumes_from: + - mydata + +- name: Restart a container + community.docker.docker_container: + name: myapplication + image: someuser/appimage + state: started + restart: yes + links: + - "myredis:aliasedredis" + devices: + - "/dev/sda:/dev/xvda:rwm" + ports: + # Publish container port 9000 as host port 8080 + - "8080:9000" + # Publish container UDP port 9001 as host port 8081 on interface 127.0.0.1 + - "127.0.0.1:8081:9001/udp" + # Publish container port 9002 as a random host port + - "9002" + # Publish container port 9003 as a free host port in range 8000-8100 + # (the host port will be selected by the Docker daemon) + - "8000-8100:9003" + # Publish container ports 9010-9020 to host ports 7000-7010 + - "7000-7010:9010-9020" + env: + SECRET_KEY: "ssssh" + # Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted + BOOLEAN_KEY: "yes" + +- name: Container present + community.docker.docker_container: + name: mycontainer + state: present + image: ubuntu:14.04 + command: sleep infinity + +- name: Stop a container + community.docker.docker_container: + name: mycontainer + state: stopped + +- name: Start 4 load-balanced containers + community.docker.docker_container: + name: "container{{ item }}" + recreate: yes + image: someuser/anotherappimage + command: sleep 1d + with_sequence: count=4 + +- name: Remove container + community.docker.docker_container: + name: ohno + state: absent + +- name: Syslogging output + community.docker.docker_container: + name: myservice + image: busybox + log_driver: syslog + log_options: + syslog-address: tcp://my-syslog-server:514 + syslog-facility: daemon + # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for + # older docker installs, use "syslog-tag" instead + tag: myservice + +- name: Create db container and connect to network + community.docker.docker_container: + name: db_test + image: "postgres:latest" + networks: + - name: "{{ docker_network_name }}" + +- name: Start container, connect to network and link + community.docker.docker_container: + name: sleeper + image: ubuntu:14.04 + networks: + - name: TestingNet + ipv4_address: "172.1.1.100" + aliases: + - sleepyzz + links: + - db_test:db + - name: TestingNet2 + +- name: Start a container with a command + community.docker.docker_container: + name: sleepy + image: ubuntu:14.04 + command: ["sleep", "infinity"] + +- name: Add container to networks + community.docker.docker_container: + name: sleepy + networks: + - name: TestingNet + ipv4_address: 172.1.1.18 + links: + - sleeper + - name: TestingNet2 + ipv4_address: 172.1.10.20 + +- name: Update network with aliases + community.docker.docker_container: + name: sleepy + networks: + - name: TestingNet + aliases: + - sleepyz + - zzzz + +- name: Remove container from one network + community.docker.docker_container: + name: sleepy + networks: + - name: TestingNet2 + purge_networks: yes + +- name: Remove container from all networks + community.docker.docker_container: + name: sleepy + purge_networks: yes + +- name: Start a container and use an env file + community.docker.docker_container: + name: agent + image: jenkinsci/ssh-slave + env_file: /var/tmp/jenkins/agent.env + +- name: Create a container with limited capabilities + community.docker.docker_container: + name: sleepy + image: ubuntu:16.04 + command: sleep infinity + capabilities: + - sys_time + cap_drop: + - all + +- name: Finer container restart/update control + community.docker.docker_container: + name: test + image: ubuntu:18.04 + env: + arg1: "true" + arg2: "whatever" + volumes: + - /tmp:/tmp + comparisons: + image: ignore # don't restart containers with older versions of the image + env: strict # we want precisely this environment + volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there + +- name: Finer container restart/update control II + community.docker.docker_container: + name: test + image: ubuntu:18.04 + env: + arg1: "true" + arg2: "whatever" + comparisons: + '*': ignore # by default, ignore *all* options (including image) + env: strict # except for environment variables; there, we want to be strict + +- name: Start container with healthstatus + community.docker.docker_container: + name: nginx-proxy + image: nginx:1.13 + state: started + healthcheck: + # Check if nginx server is healthy by curl'ing the server. + # If this fails or timeouts, the healthcheck fails. + test: ["CMD", "curl", "--fail", "http://nginx.host.com"] + interval: 1m30s + timeout: 10s + retries: 3 + start_period: 30s + +- name: Remove healthcheck from container + community.docker.docker_container: + name: nginx-proxy + image: nginx:1.13 + state: started + healthcheck: + # The "NONE" check needs to be specified + test: ["NONE"] + +- name: Start container with block device read limit + community.docker.docker_container: + name: test + image: ubuntu:18.04 + state: started + device_read_bps: + # Limit read rate for /dev/sda to 20 mebibytes per second + - path: /dev/sda + rate: 20M + device_read_iops: + # Limit read rate for /dev/sdb to 300 IO per second + - path: /dev/sdb + rate: 300 + +- name: Start container with GPUs + community.docker.docker_container: + name: test + image: ubuntu:18.04 + state: started + device_requests: + - # Add some specific devices to this container + device_ids: + - '0' + - 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a' + - # Add nVidia GPUs to this container + driver: nvidia + count: -1 # this means we want all + capabilities: + # We have one OR condition: 'gpu' AND 'utility' + - - gpu + - utility + # See https://github.com/NVIDIA/nvidia-container-runtime#supported-driver-capabilities + # for a list of capabilities supported by the nvidia driver +''' + +RETURN = ''' +container: + description: + - Facts representing the current state of the container. Matches the docker inspection output. + - Before 2.3 this was C(ansible_docker_container) but was renamed in 2.3 to C(docker_container) due to + conflicts with the connection plugin. + - Empty if I(state) is C(absent) + - If I(detached) is C(false), will include C(Output) attribute containing any output from container run. + returned: always + type: dict + sample: '{ + "AppArmorProfile": "", + "Args": [], + "Config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/usr/bin/supervisord" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "443/tcp": {}, + "80/tcp": {} + }, + "Hostname": "8e47bf643eb9", + "Image": "lnmp_nginx:v1", + "Labels": {}, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/tmp/lnmp/nginx-sites/logs/": {} + }, + ... + }' +status: + description: + - In case a container is started without detaching, this contains the exit code of the process in the container. + - Before community.docker 1.1.0, this was only returned when non-zero. + returned: when I(state) is C(started) and I(detached) is C(false), and when waiting for the container result did not fail + type: int + sample: 0 +''' + +import os +import re +import shlex +import traceback +from distutils.version import LooseVersion +from time import sleep + +from ansible.module_utils.common.text.formatters import human_to_bytes +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native, to_text + +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClient, + DifferenceTracker, + DockerBaseClass, + compare_generic, + is_image_name_id, + sanitize_result, + clean_dict_booleans_for_docker_api, + omit_none_from_dict, + parse_healthcheck, + DOCKER_COMMON_ARGS, + RequestException, +) + +try: + from docker import utils + from ansible_collections.community.docker.plugins.module_utils.common import docker_version + if LooseVersion(docker_version) >= LooseVersion('1.10.0'): + from docker.types import Ulimit, LogConfig + from docker import types as docker_types + else: + from docker.utils.types import Ulimit, LogConfig + from docker.errors import DockerException, APIError, NotFound +except Exception: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + + +REQUIRES_CONVERSION_TO_BYTES = [ + 'kernel_memory', + 'memory', + 'memory_reservation', + 'memory_swap', + 'shm_size' +] + + +def is_volume_permissions(mode): + for part in mode.split(','): + if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'): + return False + return True + + +def parse_port_range(range_or_port, client): + ''' + Parses a string containing either a single port or a range of ports. + + Returns a list of integers for each port in the list. + ''' + if '-' in range_or_port: + try: + start, end = [int(port) for port in range_or_port.split('-')] + except Exception: + client.fail('Invalid port range: "{0}"'.format(range_or_port)) + if end < start: + client.fail('Invalid port range: "{0}"'.format(range_or_port)) + return list(range(start, end + 1)) + else: + try: + return [int(range_or_port)] + except Exception: + client.fail('Invalid port: "{0}"'.format(range_or_port)) + + +def split_colon_ipv6(text, client): + ''' + Split string by ':', while keeping IPv6 addresses in square brackets in one component. + ''' + if '[' not in text: + return text.split(':') + start = 0 + result = [] + while start < len(text): + i = text.find('[', start) + if i < 0: + result.extend(text[start:].split(':')) + break + j = text.find(']', i) + if j < 0: + client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1)) + result.extend(text[start:i].split(':')) + k = text.find(':', j) + if k < 0: + result[-1] += text[i:] + start = len(text) + else: + result[-1] += text[i:k] + if k == len(text): + result.append('') + break + start = k + 1 + return result + + +class TaskParameters(DockerBaseClass): + ''' + Access and parse module parameters + ''' + + def __init__(self, client): + super(TaskParameters, self).__init__() + self.client = client + + self.auto_remove = None + self.blkio_weight = None + self.capabilities = None + self.cap_drop = None + self.cleanup = None + self.command = None + self.cpu_period = None + self.cpu_quota = None + self.cpus = None + self.cpuset_cpus = None + self.cpuset_mems = None + self.cpu_shares = None + self.debug = None + self.default_host_ip = None + self.detach = None + self.devices = None + self.device_read_bps = None + self.device_write_bps = None + self.device_read_iops = None + self.device_write_iops = None + self.device_requests = None + self.dns_servers = None + self.dns_opts = None + self.dns_search_domains = None + self.domainname = None + self.env = None + self.env_file = None + self.entrypoint = None + self.etc_hosts = None + self.exposed_ports = None + self.force_kill = None + self.groups = None + self.healthcheck = None + self.hostname = None + self.ignore_image = None + self.image = None + self.init = None + self.interactive = None + self.ipc_mode = None + self.keep_volumes = None + self.kernel_memory = None + self.kill_signal = None + self.labels = None + self.links = None + self.log_driver = None + self.output_logs = None + self.log_options = None + self.mac_address = None + self.memory = None + self.memory_reservation = None + self.memory_swap = None + self.memory_swappiness = None + self.mounts = None + self.name = None + self.network_mode = None + self.userns_mode = None + self.networks = None + self.networks_cli_compatible = None + self.oom_killer = None + self.oom_score_adj = None + self.paused = None + self.pid_mode = None + self.pids_limit = None + self.privileged = None + self.purge_networks = None + self.pull = None + self.read_only = None + self.recreate = None + self.removal_wait_timeout = None + self.restart = None + self.restart_retries = None + self.restart_policy = None + self.runtime = None + self.shm_size = None + self.security_opts = None + self.state = None + self.stop_signal = None + self.stop_timeout = None + self.tmpfs = None + self.tty = None + self.user = None + self.uts = None + self.volumes = None + self.volume_binds = dict() + self.volumes_from = None + self.volume_driver = None + self.working_dir = None + + for key, value in client.module.params.items(): + setattr(self, key, value) + self.comparisons = client.comparisons + + # If state is 'absent', parameters do not have to be parsed or interpreted. + # Only the container's name is needed. + if self.state == 'absent': + return + + if self.default_host_ip: + valid_ip = False + if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.default_host_ip): + valid_ip = True + if re.match(r'^\[[0-9a-fA-F:]+\]$', self.default_host_ip): + valid_ip = True + if re.match(r'^[0-9a-fA-F:]+$', self.default_host_ip): + self.default_host_ip = '[{0}]'.format(self.default_host_ip) + valid_ip = True + if not valid_ip: + self.fail('The value of default_host_ip must be an empty string, an IPv4 address, ' + 'or an IPv6 address. Got "{0}" instead.'.format(self.default_host_ip)) + + if self.cpus is not None: + self.cpus = int(round(self.cpus * 1E9)) + + if self.groups: + # In case integers are passed as groups, we need to convert them to + # strings as docker internally treats them as strings. + self.groups = [to_text(g, errors='surrogate_or_strict') for g in self.groups] + + for param_name in REQUIRES_CONVERSION_TO_BYTES: + if client.module.params.get(param_name): + try: + setattr(self, param_name, human_to_bytes(client.module.params.get(param_name))) + except ValueError as exc: + self.fail("Failed to convert %s to bytes: %s" % (param_name, exc)) + + self.publish_all_ports = False + self.published_ports = self._parse_publish_ports() + if self.published_ports == 'all': + self.publish_all_ports = True + self.published_ports = None + + self.ports = self._parse_exposed_ports(self.published_ports) + self.log("expose ports:") + self.log(self.ports, pretty_print=True) + + self.links = self._parse_links(self.links) + + if self.volumes: + self.volumes = self._expand_host_paths() + + self.tmpfs = self._parse_tmpfs() + self.env = self._get_environment() + self.ulimits = self._parse_ulimits() + self.sysctls = self._parse_sysctls() + self.log_config = self._parse_log_config() + try: + self.healthcheck, self.disable_healthcheck = parse_healthcheck(self.healthcheck) + except ValueError as e: + self.fail(to_native(e)) + + self.exp_links = None + self.volume_binds = self._get_volume_binds(self.volumes) + self.pid_mode = self._replace_container_names(self.pid_mode) + self.ipc_mode = self._replace_container_names(self.ipc_mode) + self.network_mode = self._replace_container_names(self.network_mode) + + self.log("volumes:") + self.log(self.volumes, pretty_print=True) + self.log("volume binds:") + self.log(self.volume_binds, pretty_print=True) + + if self.networks: + for network in self.networks: + network['id'] = self._get_network_id(network['name']) + if not network['id']: + self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name']) + if network.get('links'): + network['links'] = self._parse_links(network['links']) + + if self.mac_address: + # Ensure the MAC address uses colons instead of hyphens for later comparison + self.mac_address = self.mac_address.replace('-', ':') + + if self.entrypoint: + # convert from list to str. + self.entrypoint = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.entrypoint]) + + if self.command: + # convert from list to str + if isinstance(self.command, list): + self.command = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.command]) + + self.mounts_opt, self.expected_mounts = self._process_mounts() + + self._check_mount_target_collisions() + + for param_name in ["device_read_bps", "device_write_bps"]: + if client.module.params.get(param_name): + self._process_rate_bps(option=param_name) + + for param_name in ["device_read_iops", "device_write_iops"]: + if client.module.params.get(param_name): + self._process_rate_iops(option=param_name) + + if self.device_requests: + for dr_index, dr in enumerate(self.device_requests): + # Make sure that capabilities are lists of lists of strings + if dr['capabilities']: + for or_index, or_list in enumerate(dr['capabilities']): + for and_index, and_term in enumerate(or_list): + if not isinstance(and_term, string_types): + self.fail( + "device_requests[{0}].capabilities[{1}][{2}] is not a string".format( + dr_index, or_index, and_index)) + or_list[and_index] = to_native(and_term) + # Make sure that options is a dictionary mapping strings to strings + if dr['options']: + dr['options'] = clean_dict_booleans_for_docker_api(dr['options']) + + def fail(self, msg): + self.client.fail(msg) + + @property + def update_parameters(self): + ''' + Returns parameters used to update a container + ''' + + update_parameters = dict( + blkio_weight='blkio_weight', + cpu_period='cpu_period', + cpu_quota='cpu_quota', + cpu_shares='cpu_shares', + cpuset_cpus='cpuset_cpus', + cpuset_mems='cpuset_mems', + mem_limit='memory', + mem_reservation='memory_reservation', + memswap_limit='memory_swap', + kernel_memory='kernel_memory', + restart_policy='restart_policy', + ) + + result = dict() + for key, value in update_parameters.items(): + if getattr(self, value, None) is not None: + if key == 'restart_policy' and self.client.option_minimal_versions[value]['supported']: + restart_policy = dict(Name=self.restart_policy, + MaximumRetryCount=self.restart_retries) + result[key] = restart_policy + elif self.client.option_minimal_versions[value]['supported']: + result[key] = getattr(self, value) + return result + + @property + def create_parameters(self): + ''' + Returns parameters used to create a container + ''' + create_params = dict( + command='command', + domainname='domainname', + hostname='hostname', + user='user', + detach='detach', + stdin_open='interactive', + tty='tty', + ports='ports', + environment='env', + name='name', + entrypoint='entrypoint', + mac_address='mac_address', + labels='labels', + stop_signal='stop_signal', + working_dir='working_dir', + stop_timeout='stop_timeout', + healthcheck='healthcheck', + ) + + if self.client.docker_py_version < LooseVersion('3.0'): + # cpu_shares and volume_driver moved to create_host_config in > 3 + create_params['cpu_shares'] = 'cpu_shares' + create_params['volume_driver'] = 'volume_driver' + + result = dict( + host_config=self._host_config(), + volumes=self._get_mounts(), + ) + + for key, value in create_params.items(): + if getattr(self, value, None) is not None: + if self.client.option_minimal_versions[value]['supported']: + result[key] = getattr(self, value) + + if self.disable_healthcheck: + # Make sure image's health check is overridden + result['healthcheck'] = {'test': ['NONE']} + + if self.networks_cli_compatible and self.networks: + network = self.networks[0] + params = dict() + for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'): + if network.get(para): + params[para] = network[para] + network_config = dict() + network_config[network['name']] = self.client.create_endpoint_config(**params) + result['networking_config'] = self.client.create_networking_config(network_config) + return result + + def _expand_host_paths(self): + new_vols = [] + for vol in self.volumes: + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + host, container, mode = parts + if not is_volume_permissions(mode): + self.fail('Found invalid volumes mode: {0}'.format(mode)) + if re.match(r'[.~]', host): + host = os.path.abspath(os.path.expanduser(host)) + new_vols.append("%s:%s:%s" % (host, container, mode)) + continue + elif len(parts) == 2: + if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]): + host = os.path.abspath(os.path.expanduser(parts[0])) + new_vols.append("%s:%s:rw" % (host, parts[1])) + continue + new_vols.append(vol) + return new_vols + + def _get_mounts(self): + ''' + Return a list of container mounts. + :return: + ''' + result = [] + if self.volumes: + for vol in self.volumes: + # Only pass anonymous volumes to create container + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + continue + if len(parts) == 2: + if not is_volume_permissions(parts[1]): + continue + result.append(vol) + self.log("mounts:") + self.log(result, pretty_print=True) + return result + + def _host_config(self): + ''' + Returns parameters used to create a HostConfig object + ''' + + host_config_params = dict( + port_bindings='published_ports', + publish_all_ports='publish_all_ports', + links='links', + privileged='privileged', + cgroup_parent='cgroup_parent', + dns='dns_servers', + dns_opt='dns_opts', + dns_search='dns_search_domains', + binds='volume_binds', + volumes_from='volumes_from', + network_mode='network_mode', + userns_mode='userns_mode', + cap_add='capabilities', + cap_drop='cap_drop', + extra_hosts='etc_hosts', + read_only='read_only', + ipc_mode='ipc_mode', + security_opt='security_opts', + ulimits='ulimits', + sysctls='sysctls', + log_config='log_config', + mem_limit='memory', + memswap_limit='memory_swap', + mem_swappiness='memory_swappiness', + oom_score_adj='oom_score_adj', + oom_kill_disable='oom_killer', + shm_size='shm_size', + group_add='groups', + devices='devices', + pid_mode='pid_mode', + tmpfs='tmpfs', + init='init', + uts_mode='uts', + runtime='runtime', + auto_remove='auto_remove', + device_read_bps='device_read_bps', + device_write_bps='device_write_bps', + device_read_iops='device_read_iops', + device_write_iops='device_write_iops', + pids_limit='pids_limit', + mounts='mounts', + nano_cpus='cpus', + ) + + if self.client.docker_py_version >= LooseVersion('1.9') and self.client.docker_api_version >= LooseVersion('1.22'): + # blkio_weight can always be updated, but can only be set on creation + # when Docker SDK for Python and Docker API are new enough + host_config_params['blkio_weight'] = 'blkio_weight' + + if self.client.docker_py_version >= LooseVersion('3.0'): + # cpu_shares and volume_driver moved to create_host_config in > 3 + host_config_params['cpu_shares'] = 'cpu_shares' + host_config_params['volume_driver'] = 'volume_driver' + + params = dict() + for key, value in host_config_params.items(): + if getattr(self, value, None) is not None: + if self.client.option_minimal_versions[value]['supported']: + params[key] = getattr(self, value) + + if self.restart_policy: + params['restart_policy'] = dict(Name=self.restart_policy, + MaximumRetryCount=self.restart_retries) + + if 'mounts' in params: + params['mounts'] = self.mounts_opt + + if self.device_requests is not None: + params['device_requests'] = [dict((k, v) for k, v in dr.items() if v is not None) for dr in self.device_requests] + + return self.client.create_host_config(**params) + + def get_default_host_ip(self): + if self.default_host_ip is not None: + return self.default_host_ip + ip = '0.0.0.0' + if not self.networks: + return ip + for net in self.networks: + if net.get('name'): + try: + network = self.client.inspect_network(net['name']) + if network.get('Driver') == 'bridge' and \ + network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'): + ip = network['Options']['com.docker.network.bridge.host_binding_ipv4'] + break + except NotFound as nfe: + self.client.fail( + "Cannot inspect the network '{0}' to determine the default IP: {1}".format(net['name'], nfe), + exception=traceback.format_exc() + ) + return ip + + def _parse_publish_ports(self): + ''' + Parse ports from docker CLI syntax + ''' + if self.published_ports is None: + return None + + if 'all' in self.published_ports: + if len(self.published_ports) > 1: + self.client.module.deprecate( + 'Specifying "all" in published_ports together with port mappings is not properly ' + 'supported by the module. The port mappings are currently ignored. Please specify ' + 'only port mappings, or the value "all". The behavior for mixed usage will either ' + 'be forbidden in version 2.0.0, or properly handled. In any case, the way you ' + 'currently use the module will change in a breaking way', + collection_name='community.docker', version='2.0.0') + return 'all' + + default_ip = self.get_default_host_ip() + + binds = {} + for port in self.published_ports: + parts = split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), self.client) + container_port = parts[-1] + protocol = '' + if '/' in container_port: + container_port, protocol = parts[-1].split('/') + container_ports = parse_port_range(container_port, self.client) + + p_len = len(parts) + if p_len == 1: + port_binds = len(container_ports) * [(default_ip,)] + elif p_len == 2: + if len(container_ports) == 1: + port_binds = [(default_ip, parts[0])] + else: + port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client)] + elif p_len == 3: + # We only allow IPv4 and IPv6 addresses for the bind address + ipaddr = parts[0] + if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr): + self.fail(('Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. ' + 'Use the dig lookup to resolve hostnames. (Found hostname: {0})').format(ipaddr)) + if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr): + ipaddr = ipaddr[1:-1] + if parts[1]: + if len(container_ports) == 1: + port_binds = [(ipaddr, parts[1])] + else: + port_binds = [(ipaddr, port) for port in parse_port_range(parts[1], self.client)] + else: + port_binds = len(container_ports) * [(ipaddr,)] + + for bind, container_port in zip(port_binds, container_ports): + idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port + if idx in binds: + old_bind = binds[idx] + if isinstance(old_bind, list): + old_bind.append(bind) + else: + binds[idx] = [old_bind, bind] + else: + binds[idx] = bind + return binds + + def _get_volume_binds(self, volumes): + ''' + Extract host bindings, if any, from list of volume mapping strings. + + :return: dictionary of bind mappings + ''' + result = dict() + if volumes: + for vol in volumes: + host = None + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + host, container, mode = parts + if not is_volume_permissions(mode): + self.fail('Found invalid volumes mode: {0}'.format(mode)) + elif len(parts) == 2: + if not is_volume_permissions(parts[1]): + host, container, mode = (parts + ['rw']) + if host is not None: + result[host] = dict( + bind=container, + mode=mode + ) + return result + + def _parse_exposed_ports(self, published_ports): + ''' + Parse exposed ports from docker CLI-style ports syntax. + ''' + exposed = [] + if self.exposed_ports: + for port in self.exposed_ports: + port = to_text(port, errors='surrogate_or_strict').strip() + protocol = 'tcp' + match = re.search(r'(/.+$)', port) + if match: + protocol = match.group(1).replace('/', '') + port = re.sub(r'/.+$', '', port) + exposed.append((port, protocol)) + if published_ports: + # Any published port should also be exposed + for publish_port in published_ports: + match = False + if isinstance(publish_port, string_types) and '/' in publish_port: + port, protocol = publish_port.split('/') + port = int(port) + else: + protocol = 'tcp' + port = int(publish_port) + for exposed_port in exposed: + if exposed_port[1] != protocol: + continue + if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]: + start_port, end_port = exposed_port[0].split('-') + if int(start_port) <= port <= int(end_port): + match = True + elif exposed_port[0] == port: + match = True + if not match: + exposed.append((port, protocol)) + return exposed + + @staticmethod + def _parse_links(links): + ''' + Turn links into a dictionary + ''' + if links is None: + return None + + result = [] + for link in links: + parsed_link = link.split(':', 1) + if len(parsed_link) == 2: + result.append((parsed_link[0], parsed_link[1])) + else: + result.append((parsed_link[0], parsed_link[0])) + return result + + def _parse_ulimits(self): + ''' + Turn ulimits into an array of Ulimit objects + ''' + if self.ulimits is None: + return None + + results = [] + for limit in self.ulimits: + limits = dict() + pieces = limit.split(':') + if len(pieces) >= 2: + limits['name'] = pieces[0] + limits['soft'] = int(pieces[1]) + limits['hard'] = int(pieces[1]) + if len(pieces) == 3: + limits['hard'] = int(pieces[2]) + try: + results.append(Ulimit(**limits)) + except ValueError as exc: + self.fail("Error parsing ulimits value %s - %s" % (limit, exc)) + return results + + def _parse_sysctls(self): + ''' + Turn sysctls into an hash of Sysctl objects + ''' + return self.sysctls + + def _parse_log_config(self): + ''' + Create a LogConfig object + ''' + if self.log_driver is None: + return None + + options = dict( + Type=self.log_driver, + Config=dict() + ) + + if self.log_options is not None: + options['Config'] = dict() + for k, v in self.log_options.items(): + if not isinstance(v, string_types): + self.client.module.warn( + "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. " + "If this is not correct, or you want to avoid such warnings, please quote the value." % ( + k, to_text(v, errors='surrogate_or_strict')) + ) + v = to_text(v, errors='surrogate_or_strict') + self.log_options[k] = v + options['Config'][k] = v + + try: + return LogConfig(**options) + except ValueError as exc: + self.fail('Error parsing logging options - %s' % (exc)) + + def _parse_tmpfs(self): + ''' + Turn tmpfs into a hash of Tmpfs objects + ''' + result = dict() + if self.tmpfs is None: + return result + + for tmpfs_spec in self.tmpfs: + split_spec = tmpfs_spec.split(":", 1) + if len(split_spec) > 1: + result[split_spec[0]] = split_spec[1] + else: + result[split_spec[0]] = "" + return result + + def _get_environment(self): + """ + If environment file is combined with explicit environment variables, the explicit environment variables + take precedence. + """ + final_env = {} + if self.env_file: + parsed_env_file = utils.parse_env_file(self.env_file) + for name, value in parsed_env_file.items(): + final_env[name] = to_text(value, errors='surrogate_or_strict') + if self.env: + for name, value in self.env.items(): + if not isinstance(value, string_types): + self.fail("Non-string value found for env option. Ambiguous env options must be " + "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, )) + final_env[name] = to_text(value, errors='surrogate_or_strict') + return final_env + + def _get_network_id(self, network_name): + network_id = None + try: + for network in self.client.networks(names=[network_name]): + if network['Name'] == network_name: + network_id = network['Id'] + break + except Exception as exc: + self.fail("Error getting network id for %s - %s" % (network_name, to_native(exc))) + return network_id + + def _process_mounts(self): + if self.mounts is None: + return None, None + mounts_list = [] + mounts_expected = [] + for mount in self.mounts: + target = mount['target'] + datatype = mount['type'] + mount_dict = dict(mount) + # Sanity checks (so we don't wait for docker-py to barf on input) + if mount_dict.get('source') is None and datatype != 'tmpfs': + self.client.fail('source must be specified for mount "{0}" of type "{1}"'.format(target, datatype)) + mount_option_types = dict( + volume_driver='volume', + volume_options='volume', + propagation='bind', + no_copy='volume', + labels='volume', + tmpfs_size='tmpfs', + tmpfs_mode='tmpfs', + ) + for option, req_datatype in mount_option_types.items(): + if mount_dict.get(option) is not None and datatype != req_datatype: + self.client.fail('{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, datatype, req_datatype)) + # Handle volume_driver and volume_options + volume_driver = mount_dict.pop('volume_driver') + volume_options = mount_dict.pop('volume_options') + if volume_driver: + if volume_options: + volume_options = clean_dict_booleans_for_docker_api(volume_options) + mount_dict['driver_config'] = docker_types.DriverConfig(name=volume_driver, options=volume_options) + if mount_dict['labels']: + mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels']) + if mount_dict.get('tmpfs_size') is not None: + try: + mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size']) + except ValueError as exc: + self.fail('Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, exc)) + if mount_dict.get('tmpfs_mode') is not None: + try: + mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8) + except Exception as dummy: + self.client.fail('tmp_fs mode of mount "{0}" is not an octal string!'.format(target)) + # Fill expected mount dict + mount_expected = dict(mount) + mount_expected['tmpfs_size'] = mount_dict['tmpfs_size'] + mount_expected['tmpfs_mode'] = mount_dict['tmpfs_mode'] + # Add result to lists + mounts_list.append(docker_types.Mount(**mount_dict)) + mounts_expected.append(omit_none_from_dict(mount_expected)) + return mounts_list, mounts_expected + + def _process_rate_bps(self, option): + """ + Format device_read_bps and device_write_bps option + """ + devices_list = [] + for v in getattr(self, option): + device_dict = dict((x.title(), y) for x, y in v.items()) + device_dict['Rate'] = human_to_bytes(device_dict['Rate']) + devices_list.append(device_dict) + + setattr(self, option, devices_list) + + def _process_rate_iops(self, option): + """ + Format device_read_iops and device_write_iops option + """ + devices_list = [] + for v in getattr(self, option): + device_dict = dict((x.title(), y) for x, y in v.items()) + devices_list.append(device_dict) + + setattr(self, option, devices_list) + + def _replace_container_names(self, mode): + """ + Parse IPC and PID modes. If they contain a container name, replace + with the container's ID. + """ + if mode is None or not mode.startswith('container:'): + return mode + container_name = mode[len('container:'):] + # Try to inspect container to see whether this is an ID or a + # name (and in the latter case, retrieve it's ID) + container = self.client.get_container(container_name) + if container is None: + # If we can't find the container, issue a warning and continue with + # what the user specified. + self.client.module.warn('Cannot find a container with name or ID "{0}"'.format(container_name)) + return mode + return 'container:{0}'.format(container['Id']) + + def _check_mount_target_collisions(self): + last = dict() + + def f(t, name): + if t in last: + if name == last[t]: + self.client.fail('The mount point "{0}" appears twice in the {1} option'.format(t, name)) + else: + self.client.fail('The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t])) + last[t] = name + + if self.expected_mounts: + for t in [m['target'] for m in self.expected_mounts]: + f(t, 'mounts') + if self.volumes: + for v in self.volumes: + vs = v.split(':') + f(vs[0 if len(vs) == 1 else 1], 'volumes') + + +class Container(DockerBaseClass): + + def __init__(self, container, parameters): + super(Container, self).__init__() + self.raw = container + self.Id = None + self.container = container + if container: + self.Id = container['Id'] + self.Image = container['Image'] + self.log(self.container, pretty_print=True) + self.parameters = parameters + self.parameters.expected_links = None + self.parameters.expected_ports = None + self.parameters.expected_exposed = None + self.parameters.expected_volumes = None + self.parameters.expected_ulimits = None + self.parameters.expected_sysctls = None + self.parameters.expected_etc_hosts = None + self.parameters.expected_env = None + self.parameters.expected_device_requests = None + self.parameters_map = dict() + self.parameters_map['expected_links'] = 'links' + self.parameters_map['expected_ports'] = 'expected_ports' + self.parameters_map['expected_exposed'] = 'exposed_ports' + self.parameters_map['expected_volumes'] = 'volumes' + self.parameters_map['expected_ulimits'] = 'ulimits' + self.parameters_map['expected_sysctls'] = 'sysctls' + self.parameters_map['expected_etc_hosts'] = 'etc_hosts' + self.parameters_map['expected_env'] = 'env' + self.parameters_map['expected_entrypoint'] = 'entrypoint' + self.parameters_map['expected_binds'] = 'volumes' + self.parameters_map['expected_cmd'] = 'command' + self.parameters_map['expected_devices'] = 'devices' + self.parameters_map['expected_healthcheck'] = 'healthcheck' + self.parameters_map['expected_mounts'] = 'mounts' + self.parameters_map['expected_device_requests'] = 'device_requests' + + def fail(self, msg): + self.parameters.client.fail(msg) + + @property + def exists(self): + return True if self.container else False + + @property + def removing(self): + if self.container and self.container.get('State'): + return self.container['State'].get('Status') == 'removing' + return False + + @property + def running(self): + if self.container and self.container.get('State'): + if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False): + return True + return False + + @property + def paused(self): + if self.container and self.container.get('State'): + return self.container['State'].get('Paused', False) + return False + + def _compare(self, a, b, compare): + ''' + Compare values a and b as described in compare. + ''' + return compare_generic(a, b, compare['comparison'], compare['type']) + + def _decode_mounts(self, mounts): + if not mounts: + return mounts + result = [] + empty_dict = dict() + for mount in mounts: + res = dict() + res['type'] = mount.get('Type') + res['source'] = mount.get('Source') + res['target'] = mount.get('Target') + res['read_only'] = mount.get('ReadOnly', False) # golang's omitempty for bool returns None for False + res['consistency'] = mount.get('Consistency') + res['propagation'] = mount.get('BindOptions', empty_dict).get('Propagation') + res['no_copy'] = mount.get('VolumeOptions', empty_dict).get('NoCopy', False) + res['labels'] = mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict) + res['volume_driver'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name') + res['volume_options'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict) + res['tmpfs_size'] = mount.get('TmpfsOptions', empty_dict).get('SizeBytes') + res['tmpfs_mode'] = mount.get('TmpfsOptions', empty_dict).get('Mode') + result.append(res) + return result + + def has_different_configuration(self, image): + ''' + Diff parameters vs existing container config. Returns tuple: (True | False, List of differences) + ''' + self.log('Starting has_different_configuration') + self.parameters.expected_entrypoint = self._get_expected_entrypoint() + self.parameters.expected_links = self._get_expected_links() + self.parameters.expected_ports = self._get_expected_ports() + self.parameters.expected_exposed = self._get_expected_exposed(image) + self.parameters.expected_volumes = self._get_expected_volumes(image) + self.parameters.expected_binds = self._get_expected_binds(image) + self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits) + self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls) + self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts') + self.parameters.expected_env = self._get_expected_env(image) + self.parameters.expected_cmd = self._get_expected_cmd() + self.parameters.expected_devices = self._get_expected_devices() + self.parameters.expected_healthcheck = self._get_expected_healthcheck() + self.parameters.expected_device_requests = self._get_expected_device_requests() + + if not self.container.get('HostConfig'): + self.fail("has_config_diff: Error parsing container properties. HostConfig missing.") + if not self.container.get('Config'): + self.fail("has_config_diff: Error parsing container properties. Config missing.") + if not self.container.get('NetworkSettings'): + self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.") + + host_config = self.container['HostConfig'] + log_config = host_config.get('LogConfig', dict()) + config = self.container['Config'] + network = self.container['NetworkSettings'] + + # The previous version of the docker module ignored the detach state by + # assuming if the container was running, it must have been detached. + detach = not (config.get('AttachStderr') and config.get('AttachStdout')) + + # "ExposedPorts": null returns None type & causes AttributeError - PR #5517 + if config.get('ExposedPorts') is not None: + expected_exposed = [self._normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()] + else: + expected_exposed = [] + + # Map parameters to container inspect results + config_mapping = dict( + expected_cmd=config.get('Cmd'), + domainname=config.get('Domainname'), + hostname=config.get('Hostname'), + user=config.get('User'), + detach=detach, + init=host_config.get('Init'), + interactive=config.get('OpenStdin'), + capabilities=host_config.get('CapAdd'), + cap_drop=host_config.get('CapDrop'), + cgroup_parent=host_config.get('CgroupParent'), + expected_devices=host_config.get('Devices'), + dns_servers=host_config.get('Dns'), + dns_opts=host_config.get('DnsOptions'), + dns_search_domains=host_config.get('DnsSearch'), + expected_env=(config.get('Env') or []), + expected_entrypoint=config.get('Entrypoint'), + expected_etc_hosts=host_config['ExtraHosts'], + expected_exposed=expected_exposed, + groups=host_config.get('GroupAdd'), + ipc_mode=host_config.get("IpcMode"), + labels=config.get('Labels'), + expected_links=host_config.get('Links'), + mac_address=config.get('MacAddress', network.get('MacAddress')), + memory_swappiness=host_config.get('MemorySwappiness'), + network_mode=host_config.get('NetworkMode'), + userns_mode=host_config.get('UsernsMode'), + oom_killer=host_config.get('OomKillDisable'), + oom_score_adj=host_config.get('OomScoreAdj'), + pid_mode=host_config.get('PidMode'), + privileged=host_config.get('Privileged'), + expected_ports=host_config.get('PortBindings'), + read_only=host_config.get('ReadonlyRootfs'), + runtime=host_config.get('Runtime'), + shm_size=host_config.get('ShmSize'), + security_opts=host_config.get("SecurityOpt"), + stop_signal=config.get("StopSignal"), + tmpfs=host_config.get('Tmpfs'), + tty=config.get('Tty'), + expected_ulimits=host_config.get('Ulimits'), + expected_sysctls=host_config.get('Sysctls'), + uts=host_config.get('UTSMode'), + expected_volumes=config.get('Volumes'), + expected_binds=host_config.get('Binds'), + volume_driver=host_config.get('VolumeDriver'), + volumes_from=host_config.get('VolumesFrom'), + working_dir=config.get('WorkingDir'), + publish_all_ports=host_config.get('PublishAllPorts'), + expected_healthcheck=config.get('Healthcheck'), + disable_healthcheck=(not config.get('Healthcheck') or config.get('Healthcheck').get('Test') == ['NONE']), + device_read_bps=host_config.get('BlkioDeviceReadBps'), + device_write_bps=host_config.get('BlkioDeviceWriteBps'), + device_read_iops=host_config.get('BlkioDeviceReadIOps'), + device_write_iops=host_config.get('BlkioDeviceWriteIOps'), + expected_device_requests=host_config.get('DeviceRequests'), + pids_limit=host_config.get('PidsLimit'), + # According to https://github.com/moby/moby/, support for HostConfig.Mounts + # has been included at least since v17.03.0-ce, which has API version 1.26. + # The previous tag, v1.9.1, has API version 1.21 and does not have + # HostConfig.Mounts. I have no idea what about API 1.25... + expected_mounts=self._decode_mounts(host_config.get('Mounts')), + cpus=host_config.get('NanoCpus'), + ) + # Options which don't make sense without their accompanying option + if self.parameters.log_driver: + config_mapping['log_driver'] = log_config.get('Type') + config_mapping['log_options'] = log_config.get('Config') + + if self.parameters.client.option_minimal_versions['auto_remove']['supported']: + # auto_remove is only supported in Docker SDK for Python >= 2.0.0; unfortunately + # it has a default value, that's why we have to jump through the hoops here + config_mapping['auto_remove'] = host_config.get('AutoRemove') + + if self.parameters.client.option_minimal_versions['stop_timeout']['supported']: + # stop_timeout is only supported in Docker SDK for Python >= 2.1. Note that + # stop_timeout has a hybrid role, in that it used to be something only used + # for stopping containers, and is now also used as a container property. + # That's why it needs special handling here. + config_mapping['stop_timeout'] = config.get('StopTimeout') + + if self.parameters.client.docker_api_version < LooseVersion('1.22'): + # For docker API < 1.22, update_container() is not supported. Thus + # we need to handle all limits which are usually handled by + # update_container() as configuration changes which require a container + # restart. + restart_policy = host_config.get('RestartPolicy', dict()) + + # Options which don't make sense without their accompanying option + if self.parameters.restart_policy: + config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount') + + config_mapping.update(dict( + blkio_weight=host_config.get('BlkioWeight'), + cpu_period=host_config.get('CpuPeriod'), + cpu_quota=host_config.get('CpuQuota'), + cpu_shares=host_config.get('CpuShares'), + cpuset_cpus=host_config.get('CpusetCpus'), + cpuset_mems=host_config.get('CpusetMems'), + kernel_memory=host_config.get("KernelMemory"), + memory=host_config.get('Memory'), + memory_reservation=host_config.get('MemoryReservation'), + memory_swap=host_config.get('MemorySwap'), + restart_policy=restart_policy.get('Name') + )) + + differences = DifferenceTracker() + for key, value in config_mapping.items(): + minimal_version = self.parameters.client.option_minimal_versions.get(key, {}) + if not minimal_version.get('supported', True): + continue + compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)] + self.log('check differences %s %s vs %s (%s)' % (key, getattr(self.parameters, key), to_text(value, errors='surrogate_or_strict'), compare)) + if getattr(self.parameters, key, None) is not None: + match = self._compare(getattr(self.parameters, key), value, compare) + + if not match: + # no match. record the differences + p = getattr(self.parameters, key) + c = value + if compare['type'] == 'set': + # Since the order does not matter, sort so that the diff output is better. + if p is not None: + p = sorted(p) + if c is not None: + c = sorted(c) + elif compare['type'] == 'set(dict)': + # Since the order does not matter, sort so that the diff output is better. + if key == 'expected_mounts': + # For selected values, use one entry as key + def sort_key_fn(x): + return x['target'] + else: + # We sort the list of dictionaries by using the sorted items of a dict as its key. + def sort_key_fn(x): + return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items()) + if p is not None: + p = sorted(p, key=sort_key_fn) + if c is not None: + c = sorted(c, key=sort_key_fn) + differences.add(key, parameter=p, active=c) + + has_differences = not differences.empty + return has_differences, differences + + def has_different_resource_limits(self): + ''' + Diff parameters and container resource limits + ''' + if not self.container.get('HostConfig'): + self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.") + if self.parameters.client.docker_api_version < LooseVersion('1.22'): + # update_container() call not supported + return False, [] + + host_config = self.container['HostConfig'] + + restart_policy = host_config.get('RestartPolicy') or dict() + + config_mapping = dict( + blkio_weight=host_config.get('BlkioWeight'), + cpu_period=host_config.get('CpuPeriod'), + cpu_quota=host_config.get('CpuQuota'), + cpu_shares=host_config.get('CpuShares'), + cpuset_cpus=host_config.get('CpusetCpus'), + cpuset_mems=host_config.get('CpusetMems'), + kernel_memory=host_config.get("KernelMemory"), + memory=host_config.get('Memory'), + memory_reservation=host_config.get('MemoryReservation'), + memory_swap=host_config.get('MemorySwap'), + restart_policy=restart_policy.get('Name') + ) + + # Options which don't make sense without their accompanying option + if self.parameters.restart_policy: + config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount') + + differences = DifferenceTracker() + for key, value in config_mapping.items(): + if getattr(self.parameters, key, None): + compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)] + match = self._compare(getattr(self.parameters, key), value, compare) + + if not match: + # no match. record the differences + differences.add(key, parameter=getattr(self.parameters, key), active=value) + different = not differences.empty + return different, differences + + def has_network_differences(self): + ''' + Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6 + ''' + different = False + differences = [] + + if not self.parameters.networks: + return different, differences + + if not self.container.get('NetworkSettings'): + self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.") + + connected_networks = self.container['NetworkSettings']['Networks'] + for network in self.parameters.networks: + network_info = connected_networks.get(network['name']) + if network_info is None: + different = True + differences.append(dict( + parameter=network, + container=None + )) + else: + diff = False + network_info_ipam = network_info.get('IPAMConfig') or {} + if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'): + diff = True + if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'): + diff = True + if network.get('aliases'): + if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'): + diff = True + if network.get('links'): + expected_links = [] + for link, alias in network['links']: + expected_links.append("%s:%s" % (link, alias)) + if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'): + diff = True + if diff: + different = True + differences.append(dict( + parameter=network, + container=dict( + name=network['name'], + ipv4_address=network_info_ipam.get('IPv4Address'), + ipv6_address=network_info_ipam.get('IPv6Address'), + aliases=network_info.get('Aliases'), + links=network_info.get('Links') + ) + )) + return different, differences + + def has_extra_networks(self): + ''' + Check if the container is connected to non-requested networks + ''' + extra_networks = [] + extra = False + + if not self.container.get('NetworkSettings'): + self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.") + + connected_networks = self.container['NetworkSettings'].get('Networks') + if connected_networks: + for network, network_config in connected_networks.items(): + keep = False + if self.parameters.networks: + for expected_network in self.parameters.networks: + if expected_network['name'] == network: + keep = True + if not keep: + extra = True + extra_networks.append(dict(name=network, id=network_config['NetworkID'])) + return extra, extra_networks + + def _get_expected_devices(self): + if not self.parameters.devices: + return None + expected_devices = [] + for device in self.parameters.devices: + parts = device.split(':') + if len(parts) == 1: + expected_devices.append( + dict( + CgroupPermissions='rwm', + PathInContainer=parts[0], + PathOnHost=parts[0] + )) + elif len(parts) == 2: + parts = device.split(':') + expected_devices.append( + dict( + CgroupPermissions='rwm', + PathInContainer=parts[1], + PathOnHost=parts[0] + ) + ) + else: + expected_devices.append( + dict( + CgroupPermissions=parts[2], + PathInContainer=parts[1], + PathOnHost=parts[0] + )) + return expected_devices + + def _get_expected_entrypoint(self): + if not self.parameters.entrypoint: + return None + return shlex.split(self.parameters.entrypoint) + + def _get_expected_ports(self): + if self.parameters.published_ports is None: + return None + expected_bound_ports = {} + for container_port, config in self.parameters.published_ports.items(): + if isinstance(container_port, int): + container_port = "%s/tcp" % container_port + if len(config) == 1: + if isinstance(config[0], int): + expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}] + else: + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}] + elif isinstance(config[0], tuple): + expected_bound_ports[container_port] = [] + for host_ip, host_port in config: + expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': to_text(host_port, errors='surrogate_or_strict')}) + else: + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': to_text(config[1], errors='surrogate_or_strict')}] + return expected_bound_ports + + def _get_expected_links(self): + if self.parameters.links is None: + return None + self.log('parameter links:') + self.log(self.parameters.links, pretty_print=True) + exp_links = [] + for link, alias in self.parameters.links: + exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias)) + return exp_links + + def _get_expected_binds(self, image): + self.log('_get_expected_binds') + image_vols = [] + if image: + image_vols = self._get_image_binds(image[self.parameters.client.image_inspect_source].get('Volumes')) + param_vols = [] + if self.parameters.volumes: + for vol in self.parameters.volumes: + host = None + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + host, container, mode = parts + if not is_volume_permissions(mode): + self.fail('Found invalid volumes mode: {0}'.format(mode)) + if len(parts) == 2: + if not is_volume_permissions(parts[1]): + host, container, mode = parts + ['rw'] + if host: + param_vols.append("%s:%s:%s" % (host, container, mode)) + result = list(set(image_vols + param_vols)) + self.log("expected_binds:") + self.log(result, pretty_print=True) + return result + + def _get_expected_device_requests(self): + if self.parameters.device_requests is None: + return None + device_requests = [] + for dr in self.parameters.device_requests: + device_requests.append({ + 'Driver': dr['driver'], + 'Count': dr['count'], + 'DeviceIDs': dr['device_ids'], + 'Capabilities': dr['capabilities'], + 'Options': dr['options'], + }) + return device_requests + + def _get_image_binds(self, volumes): + ''' + Convert array of binds to array of strings with format host_path:container_path:mode + + :param volumes: array of bind dicts + :return: array of strings + ''' + results = [] + if isinstance(volumes, dict): + results += self._get_bind_from_dict(volumes) + elif isinstance(volumes, list): + for vol in volumes: + results += self._get_bind_from_dict(vol) + return results + + @staticmethod + def _get_bind_from_dict(volume_dict): + results = [] + if volume_dict: + for host_path, config in volume_dict.items(): + if isinstance(config, dict) and config.get('bind'): + container_path = config.get('bind') + mode = config.get('mode', 'rw') + results.append("%s:%s:%s" % (host_path, container_path, mode)) + return results + + def _get_expected_volumes(self, image): + self.log('_get_expected_volumes') + expected_vols = dict() + if image and image[self.parameters.client.image_inspect_source].get('Volumes'): + expected_vols.update(image[self.parameters.client.image_inspect_source].get('Volumes')) + + if self.parameters.volumes: + for vol in self.parameters.volumes: + # We only expect anonymous volumes to show up in the list + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + continue + if len(parts) == 2: + if not is_volume_permissions(parts[1]): + continue + expected_vols[vol] = dict() + + if not expected_vols: + expected_vols = None + self.log("expected_volumes:") + self.log(expected_vols, pretty_print=True) + return expected_vols + + def _get_expected_env(self, image): + self.log('_get_expected_env') + expected_env = dict() + if image and image[self.parameters.client.image_inspect_source].get('Env'): + for env_var in image[self.parameters.client.image_inspect_source]['Env']: + parts = env_var.split('=', 1) + expected_env[parts[0]] = parts[1] + if self.parameters.env: + expected_env.update(self.parameters.env) + param_env = [] + for key, value in expected_env.items(): + param_env.append("%s=%s" % (key, value)) + return param_env + + def _get_expected_exposed(self, image): + self.log('_get_expected_exposed') + image_ports = [] + if image: + image_exposed_ports = image[self.parameters.client.image_inspect_source].get('ExposedPorts') or {} + image_ports = [self._normalize_port(p) for p in image_exposed_ports.keys()] + param_ports = [] + if self.parameters.ports: + param_ports = [to_text(p[0], errors='surrogate_or_strict') + '/' + p[1] for p in self.parameters.ports] + result = list(set(image_ports + param_ports)) + self.log(result, pretty_print=True) + return result + + def _get_expected_ulimits(self, config_ulimits): + self.log('_get_expected_ulimits') + if config_ulimits is None: + return None + results = [] + for limit in config_ulimits: + results.append(dict( + Name=limit.name, + Soft=limit.soft, + Hard=limit.hard + )) + return results + + def _get_expected_sysctls(self, config_sysctls): + self.log('_get_expected_sysctls') + if config_sysctls is None: + return None + result = dict() + for key, value in config_sysctls.items(): + result[key] = to_text(value, errors='surrogate_or_strict') + return result + + def _get_expected_cmd(self): + self.log('_get_expected_cmd') + if not self.parameters.command: + return None + return shlex.split(self.parameters.command) + + def _convert_simple_dict_to_list(self, param_name, join_with=':'): + if getattr(self.parameters, param_name, None) is None: + return None + results = [] + for key, value in getattr(self.parameters, param_name).items(): + results.append("%s%s%s" % (key, join_with, value)) + return results + + def _normalize_port(self, port): + if '/' not in port: + return port + '/tcp' + return port + + def _get_expected_healthcheck(self): + self.log('_get_expected_healthcheck') + expected_healthcheck = dict() + + if self.parameters.healthcheck: + expected_healthcheck.update([(k.title().replace("_", ""), v) + for k, v in self.parameters.healthcheck.items()]) + + return expected_healthcheck + + +class ContainerManager(DockerBaseClass): + ''' + Perform container management tasks + ''' + + def __init__(self, client): + + super(ContainerManager, self).__init__() + + if client.module.params.get('log_options') and not client.module.params.get('log_driver'): + client.module.warn('log_options is ignored when log_driver is not specified') + if client.module.params.get('healthcheck') and not client.module.params.get('healthcheck').get('test'): + client.module.warn('healthcheck is ignored when test is not specified') + if client.module.params.get('restart_retries') is not None and not client.module.params.get('restart_policy'): + client.module.warn('restart_retries is ignored when restart_policy is not specified') + + self.client = client + self.parameters = TaskParameters(client) + self.check_mode = self.client.check_mode + self.results = {'changed': False, 'actions': []} + self.diff = {} + self.diff_tracker = DifferenceTracker() + self.facts = {} + + state = self.parameters.state + if state in ('stopped', 'started', 'present'): + self.present(state) + elif state == 'absent': + self.absent() + + if not self.check_mode and not self.parameters.debug: + self.results.pop('actions') + + if self.client.module._diff or self.parameters.debug: + self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after() + self.results['diff'] = self.diff + + if self.facts: + self.results['container'] = self.facts + + def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None): + delay = 1.0 + total_wait = 0 + while True: + # Inspect container + result = self.client.get_container_by_id(container_id) + if result is None: + if accept_removal: + return + msg = 'Encontered vanished container while waiting for container "{0}"' + self.fail(msg.format(container_id)) + # Check container state + state = result.get('State', {}).get('Status') + if complete_states is not None and state in complete_states: + return + if wait_states is not None and state not in wait_states: + msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"' + self.fail(msg.format(container_id, state)) + # Wait + if max_wait is not None: + if total_wait > max_wait: + msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"' + self.fail(msg.format(container_id, max_wait)) + if total_wait + delay > max_wait: + delay = max_wait - total_wait + sleep(delay) + total_wait += delay + # Exponential backoff, but never wait longer than 10 seconds + # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations + # until the maximal 10 seconds delay is reached. By then, the + # code will have slept for ~1.5 minutes.) + delay = min(delay * 1.1, 10) + + def present(self, state): + container = self._get_container(self.parameters.name) + was_running = container.running + was_paused = container.paused + container_created = False + + # If the image parameter was passed then we need to deal with the image + # version comparison. Otherwise we handle this depending on whether + # the container already runs or not; in the former case, in case the + # container needs to be restarted, we use the existing container's + # image ID. + image = self._get_image() + self.log(image, pretty_print=True) + if not container.exists or container.removing: + # New container + if container.removing: + self.log('Found container in removal phase') + else: + self.log('No container found') + if not self.parameters.image: + self.fail('Cannot create container when image is not specified!') + self.diff_tracker.add('exists', parameter=True, active=False) + if container.removing and not self.check_mode: + # Wait for container to be removed before trying to create it + self.wait_for_state( + container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout) + new_container = self.container_create(self.parameters.image, self.parameters.create_parameters) + if new_container: + container = new_container + container_created = True + else: + # Existing container + different, differences = container.has_different_configuration(image) + image_different = False + if self.parameters.comparisons['image']['comparison'] == 'strict': + image_different = self._image_is_different(image, container) + if image_different or different or self.parameters.recreate: + self.diff_tracker.merge(differences) + self.diff['differences'] = differences.get_legacy_docker_container_diffs() + if image_different: + self.diff['image_different'] = True + self.log("differences") + self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True) + image_to_use = self.parameters.image + if not image_to_use and container and container.Image: + image_to_use = container.Image + if not image_to_use: + self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!') + if container.running: + self.container_stop(container.Id) + self.container_remove(container.Id) + if not self.check_mode: + self.wait_for_state( + container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout) + new_container = self.container_create(image_to_use, self.parameters.create_parameters) + if new_container: + container = new_container + container_created = True + + if container and container.exists: + container = self.update_limits(container) + container = self.update_networks(container, container_created) + + if state == 'started' and not container.running: + self.diff_tracker.add('running', parameter=True, active=was_running) + container = self.container_start(container.Id) + elif state == 'started' and self.parameters.restart: + self.diff_tracker.add('running', parameter=True, active=was_running) + self.diff_tracker.add('restarted', parameter=True, active=False) + container = self.container_restart(container.Id) + elif state == 'stopped' and container.running: + self.diff_tracker.add('running', parameter=False, active=was_running) + self.container_stop(container.Id) + container = self._get_container(container.Id) + + if state == 'started' and self.parameters.paused is not None and container.paused != self.parameters.paused: + self.diff_tracker.add('paused', parameter=self.parameters.paused, active=was_paused) + if not self.check_mode: + try: + if self.parameters.paused: + self.client.pause(container=container.Id) + else: + self.client.unpause(container=container.Id) + except Exception as exc: + self.fail("Error %s container %s: %s" % ( + "pausing" if self.parameters.paused else "unpausing", container.Id, to_native(exc) + )) + container = self._get_container(container.Id) + self.results['changed'] = True + self.results['actions'].append(dict(set_paused=self.parameters.paused)) + + self.facts = container.raw + + def absent(self): + container = self._get_container(self.parameters.name) + if container.exists: + if container.running: + self.diff_tracker.add('running', parameter=False, active=True) + self.container_stop(container.Id) + self.diff_tracker.add('exists', parameter=False, active=True) + self.container_remove(container.Id) + + def fail(self, msg, **kwargs): + self.client.fail(msg, **kwargs) + + def _output_logs(self, msg): + self.client.module.log(msg=msg) + + def _get_container(self, container): + ''' + Expects container ID or Name. Returns a container object + ''' + return Container(self.client.get_container(container), self.parameters) + + def _get_image(self): + if not self.parameters.image: + self.log('No image specified') + return None + if is_image_name_id(self.parameters.image): + image = self.client.find_image_by_id(self.parameters.image) + else: + repository, tag = utils.parse_repository_tag(self.parameters.image) + if not tag: + tag = "latest" + image = self.client.find_image(repository, tag) + if not image or self.parameters.pull: + if not self.check_mode: + self.log("Pull the image.") + image, alreadyToLatest = self.client.pull_image(repository, tag) + if alreadyToLatest: + self.results['changed'] = False + else: + self.results['changed'] = True + self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) + elif not image: + # If the image isn't there, claim we'll pull. + # (Implicitly: if the image is there, claim it already was latest.) + self.results['changed'] = True + self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) + + self.log("image") + self.log(image, pretty_print=True) + return image + + def _image_is_different(self, image, container): + if image and image.get('Id'): + if container and container.Image: + if image.get('Id') != container.Image: + self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image) + return True + return False + + def update_limits(self, container): + limits_differ, different_limits = container.has_different_resource_limits() + if limits_differ: + self.log("limit differences:") + self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True) + self.diff_tracker.merge(different_limits) + if limits_differ and not self.check_mode: + self.container_update(container.Id, self.parameters.update_parameters) + return self._get_container(container.Id) + return container + + def update_networks(self, container, container_created): + updated_container = container + if self.parameters.comparisons['networks']['comparison'] != 'ignore' or container_created: + has_network_differences, network_differences = container.has_network_differences() + if has_network_differences: + if self.diff.get('differences'): + self.diff['differences'].append(dict(network_differences=network_differences)) + else: + self.diff['differences'] = [dict(network_differences=network_differences)] + for netdiff in network_differences: + self.diff_tracker.add( + 'network.{0}'.format(netdiff['parameter']['name']), + parameter=netdiff['parameter'], + active=netdiff['container'] + ) + self.results['changed'] = True + updated_container = self._add_networks(container, network_differences) + + if (self.parameters.comparisons['networks']['comparison'] == 'strict' and self.parameters.networks is not None) or self.parameters.purge_networks: + has_extra_networks, extra_networks = container.has_extra_networks() + if has_extra_networks: + if self.diff.get('differences'): + self.diff['differences'].append(dict(purge_networks=extra_networks)) + else: + self.diff['differences'] = [dict(purge_networks=extra_networks)] + for extra_network in extra_networks: + self.diff_tracker.add( + 'network.{0}'.format(extra_network['name']), + active=extra_network + ) + self.results['changed'] = True + updated_container = self._purge_networks(container, extra_networks) + return updated_container + + def _add_networks(self, container, differences): + for diff in differences: + # remove the container from the network, if connected + if diff.get('container'): + self.results['actions'].append(dict(removed_from_network=diff['parameter']['name'])) + if not self.check_mode: + try: + self.client.disconnect_container_from_network(container.Id, diff['parameter']['id']) + except Exception as exc: + self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], + to_native(exc))) + # connect to the network + params = dict() + for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'): + if diff['parameter'].get(para): + params[para] = diff['parameter'][para] + self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params)) + if not self.check_mode: + try: + self.log("Connecting container to network %s" % diff['parameter']['id']) + self.log(params, pretty_print=True) + self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params) + except Exception as exc: + self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc))) + return self._get_container(container.Id) + + def _purge_networks(self, container, networks): + for network in networks: + self.results['actions'].append(dict(removed_from_network=network['name'])) + if not self.check_mode: + try: + self.client.disconnect_container_from_network(container.Id, network['name']) + except Exception as exc: + self.fail("Error disconnecting container from network %s - %s" % (network['name'], + to_native(exc))) + return self._get_container(container.Id) + + def container_create(self, image, create_parameters): + self.log("create container") + self.log("image: %s parameters:" % image) + self.log(create_parameters, pretty_print=True) + self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters)) + self.results['changed'] = True + new_container = None + if not self.check_mode: + try: + new_container = self.client.create_container(image, **create_parameters) + self.client.report_warnings(new_container) + except Exception as exc: + self.fail("Error creating container: %s" % to_native(exc)) + return self._get_container(new_container['Id']) + return new_container + + def container_start(self, container_id): + self.log("start container %s" % (container_id)) + self.results['actions'].append(dict(started=container_id)) + self.results['changed'] = True + if not self.check_mode: + try: + self.client.start(container=container_id) + except Exception as exc: + self.fail("Error starting container %s: %s" % (container_id, to_native(exc))) + + if self.parameters.detach is False: + if self.client.docker_py_version >= LooseVersion('3.0'): + status = self.client.wait(container_id)['StatusCode'] + else: + status = self.client.wait(container_id) + self.client.fail_results['status'] = status + self.results['status'] = status + + if self.parameters.auto_remove: + output = "Cannot retrieve result as auto_remove is enabled" + if self.parameters.output_logs: + self.client.module.warn('Cannot output_logs if auto_remove is enabled!') + else: + config = self.client.inspect_container(container_id) + logging_driver = config['HostConfig']['LogConfig']['Type'] + + if logging_driver in ('json-file', 'journald'): + output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False) + if self.parameters.output_logs: + self._output_logs(msg=output) + else: + output = "Result logged using `%s` driver" % logging_driver + + if status != 0: + self.fail(output) + if self.parameters.cleanup: + self.container_remove(container_id, force=True) + insp = self._get_container(container_id) + if insp.raw: + insp.raw['Output'] = output + else: + insp.raw = dict(Output=output) + return insp + return self._get_container(container_id) + + def container_remove(self, container_id, link=False, force=False): + volume_state = (not self.parameters.keep_volumes) + self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) + self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) + self.results['changed'] = True + response = None + if not self.check_mode: + count = 0 + while True: + try: + response = self.client.remove_container(container_id, v=volume_state, link=link, force=force) + except NotFound as dummy: + pass + except APIError as exc: + if 'Unpause the container before stopping or killing' in exc.explanation: + # New docker daemon versions do not allow containers to be removed + # if they are paused. Make sure we don't end up in an infinite loop. + if count == 3: + self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc))) + count += 1 + # Unpause + try: + self.client.unpause(container=container_id) + except Exception as exc2: + self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2))) + # Now try again + continue + if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation: + pass + else: + self.fail("Error removing container %s: %s" % (container_id, to_native(exc))) + except Exception as exc: + self.fail("Error removing container %s: %s" % (container_id, to_native(exc))) + # We only loop when explicitly requested by 'continue' + break + return response + + def container_update(self, container_id, update_parameters): + if update_parameters: + self.log("update container %s" % (container_id)) + self.log(update_parameters, pretty_print=True) + self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters)) + self.results['changed'] = True + if not self.check_mode and callable(getattr(self.client, 'update_container')): + try: + result = self.client.update_container(container_id, **update_parameters) + self.client.report_warnings(result) + except Exception as exc: + self.fail("Error updating container %s: %s" % (container_id, to_native(exc))) + return self._get_container(container_id) + + def container_kill(self, container_id): + self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal)) + self.results['changed'] = True + response = None + if not self.check_mode: + try: + if self.parameters.kill_signal: + response = self.client.kill(container_id, signal=self.parameters.kill_signal) + else: + response = self.client.kill(container_id) + except Exception as exc: + self.fail("Error killing container %s: %s" % (container_id, exc)) + return response + + def container_restart(self, container_id): + self.results['actions'].append(dict(restarted=container_id, timeout=self.parameters.stop_timeout)) + self.results['changed'] = True + if not self.check_mode: + try: + if self.parameters.stop_timeout: + dummy = self.client.restart(container_id, timeout=self.parameters.stop_timeout) + else: + dummy = self.client.restart(container_id) + except Exception as exc: + self.fail("Error restarting container %s: %s" % (container_id, to_native(exc))) + return self._get_container(container_id) + + def container_stop(self, container_id): + if self.parameters.force_kill: + self.container_kill(container_id) + return + self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout)) + self.results['changed'] = True + response = None + if not self.check_mode: + count = 0 + while True: + try: + if self.parameters.stop_timeout: + response = self.client.stop(container_id, timeout=self.parameters.stop_timeout) + else: + response = self.client.stop(container_id) + except APIError as exc: + if 'Unpause the container before stopping or killing' in exc.explanation: + # New docker daemon versions do not allow containers to be removed + # if they are paused. Make sure we don't end up in an infinite loop. + if count == 3: + self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc))) + count += 1 + # Unpause + try: + self.client.unpause(container=container_id) + except Exception as exc2: + self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2))) + # Now try again + continue + self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) + except Exception as exc: + self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) + # We only loop when explicitly requested by 'continue' + break + return response + + +def detect_ipvX_address_usage(client): + ''' + Helper function to detect whether any specified network uses ipv4_address or ipv6_address + ''' + for network in client.module.params.get("networks") or []: + if network.get('ipv4_address') is not None or network.get('ipv6_address') is not None: + return True + return False + + +class AnsibleDockerClientContainer(AnsibleDockerClient): + # A list of module options which are not docker container properties + __NON_CONTAINER_PROPERTY_OPTIONS = tuple([ + 'env_file', 'force_kill', 'keep_volumes', 'ignore_image', 'name', 'pull', 'purge_networks', + 'recreate', 'restart', 'state', 'networks', 'cleanup', 'kill_signal', + 'output_logs', 'paused', 'removal_wait_timeout', 'default_host_ip', + ] + list(DOCKER_COMMON_ARGS.keys())) + + def _parse_comparisons(self): + comparisons = {} + comp_aliases = {} + # Put in defaults + explicit_types = dict( + command='list', + devices='set(dict)', + device_requests='set(dict)', + dns_search_domains='list', + dns_servers='list', + env='set', + entrypoint='list', + etc_hosts='set', + mounts='set(dict)', + networks='set(dict)', + ulimits='set(dict)', + device_read_bps='set(dict)', + device_write_bps='set(dict)', + device_read_iops='set(dict)', + device_write_iops='set(dict)', + ) + all_options = set() # this is for improving user feedback when a wrong option was specified for comparison + default_values = dict( + stop_timeout='ignore', + ) + for option, data in self.module.argument_spec.items(): + all_options.add(option) + for alias in data.get('aliases', []): + all_options.add(alias) + # Ignore options which aren't used as container properties + if option in self.__NON_CONTAINER_PROPERTY_OPTIONS and option != 'networks': + continue + # Determine option type + if option in explicit_types: + datatype = explicit_types[option] + elif data['type'] == 'list': + datatype = 'set' + elif data['type'] == 'dict': + datatype = 'dict' + else: + datatype = 'value' + # Determine comparison type + if option in default_values: + comparison = default_values[option] + elif datatype in ('list', 'value'): + comparison = 'strict' + else: + comparison = 'allow_more_present' + comparisons[option] = dict(type=datatype, comparison=comparison, name=option) + # Keep track of aliases + comp_aliases[option] = option + for alias in data.get('aliases', []): + comp_aliases[alias] = option + # Process legacy ignore options + if self.module.params['ignore_image']: + comparisons['image']['comparison'] = 'ignore' + if self.module.params['purge_networks']: + comparisons['networks']['comparison'] = 'strict' + # Process options + if self.module.params.get('comparisons'): + # If '*' appears in comparisons, process it first + if '*' in self.module.params['comparisons']: + value = self.module.params['comparisons']['*'] + if value not in ('strict', 'ignore'): + self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!") + for option, v in comparisons.items(): + if option == 'networks': + # `networks` is special: only update if + # some value is actually specified + if self.module.params['networks'] is None: + continue + v['comparison'] = value + # Now process all other comparisons. + comp_aliases_used = {} + for key, value in self.module.params['comparisons'].items(): + if key == '*': + continue + # Find main key + key_main = comp_aliases.get(key) + if key_main is None: + if key_main in all_options: + self.fail("The module option '%s' cannot be specified in the comparisons dict, " + "since it does not correspond to container's state!" % key) + self.fail("Unknown module option '%s' in comparisons dict!" % key) + if key_main in comp_aliases_used: + self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main)) + comp_aliases_used[key_main] = key + # Check value and update accordingly + if value in ('strict', 'ignore'): + comparisons[key_main]['comparison'] = value + elif value == 'allow_more_present': + if comparisons[key_main]['type'] == 'value': + self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value)) + comparisons[key_main]['comparison'] = value + else: + self.fail("Unknown comparison mode '%s'!" % value) + # Add implicit options + comparisons['publish_all_ports'] = dict(type='value', comparison='strict', name='published_ports') + comparisons['expected_ports'] = dict(type='dict', comparison=comparisons['published_ports']['comparison'], name='expected_ports') + comparisons['disable_healthcheck'] = dict(type='value', + comparison='ignore' if comparisons['healthcheck']['comparison'] == 'ignore' else 'strict', + name='disable_healthcheck') + # Check legacy values + if self.module.params['ignore_image'] and comparisons['image']['comparison'] != 'ignore': + self.module.warn('The ignore_image option has been overridden by the comparisons option!') + if self.module.params['purge_networks'] and comparisons['networks']['comparison'] != 'strict': + self.module.warn('The purge_networks option has been overridden by the comparisons option!') + self.comparisons = comparisons + + def _get_additional_minimal_versions(self): + stop_timeout_supported = self.docker_api_version >= LooseVersion('1.25') + stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent' + if stop_timeout_supported: + stop_timeout_supported = self.docker_py_version >= LooseVersion('2.1') + if stop_timeout_needed_for_update and not stop_timeout_supported: + # We warn (instead of fail) since in older versions, stop_timeout was not used + # to update the container's configuration, but only when stopping a container. + self.module.warn("Docker SDK for Python's version is %s. Minimum version required is 2.1 to update " + "the container's stop_timeout configuration. " + "If you use the 'docker-py' module, you have to switch to the 'docker' Python package." % (docker_version,)) + else: + if stop_timeout_needed_for_update and not stop_timeout_supported: + # We warn (instead of fail) since in older versions, stop_timeout was not used + # to update the container's configuration, but only when stopping a container. + self.module.warn("Docker API version is %s. Minimum version required is 1.25 to set or " + "update the container's stop_timeout configuration." % (self.docker_api_version_str,)) + self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported + + def __init__(self, **kwargs): + option_minimal_versions = dict( + # internal options + log_config=dict(), + publish_all_ports=dict(), + ports=dict(), + volume_binds=dict(), + name=dict(), + # normal options + device_read_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'), + device_read_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'), + device_write_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'), + device_write_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'), + device_requests=dict(docker_py_version='4.3.0', docker_api_version='1.40'), + dns_opts=dict(docker_api_version='1.21', docker_py_version='1.10.0'), + ipc_mode=dict(docker_api_version='1.25'), + mac_address=dict(docker_api_version='1.25'), + oom_score_adj=dict(docker_api_version='1.22'), + shm_size=dict(docker_api_version='1.22'), + stop_signal=dict(docker_api_version='1.21'), + tmpfs=dict(docker_api_version='1.22'), + volume_driver=dict(docker_api_version='1.21'), + memory_reservation=dict(docker_api_version='1.21'), + kernel_memory=dict(docker_api_version='1.21'), + auto_remove=dict(docker_py_version='2.1.0', docker_api_version='1.25'), + healthcheck=dict(docker_py_version='2.0.0', docker_api_version='1.24'), + init=dict(docker_py_version='2.2.0', docker_api_version='1.25'), + runtime=dict(docker_py_version='2.4.0', docker_api_version='1.25'), + sysctls=dict(docker_py_version='1.10.0', docker_api_version='1.24'), + userns_mode=dict(docker_py_version='1.10.0', docker_api_version='1.23'), + uts=dict(docker_py_version='3.5.0', docker_api_version='1.25'), + pids_limit=dict(docker_py_version='1.10.0', docker_api_version='1.23'), + mounts=dict(docker_py_version='2.6.0', docker_api_version='1.25'), + cpus=dict(docker_py_version='2.3.0', docker_api_version='1.25'), + # specials + ipvX_address_supported=dict(docker_py_version='1.9.0', docker_api_version='1.22', + detect_usage=detect_ipvX_address_usage, + usage_msg='ipv4_address or ipv6_address in networks'), + stop_timeout=dict(), # see _get_additional_minimal_versions() + ) + + super(AnsibleDockerClientContainer, self).__init__( + option_minimal_versions=option_minimal_versions, + option_minimal_versions_ignore_params=self.__NON_CONTAINER_PROPERTY_OPTIONS, + **kwargs + ) + + self.image_inspect_source = 'Config' + if self.docker_api_version < LooseVersion('1.21'): + self.image_inspect_source = 'ContainerConfig' + + self._get_additional_minimal_versions() + self._parse_comparisons() + + if self.module.params['container_default_behavior'] is None: + self.module.params['container_default_behavior'] = 'compatibility' + self.module.deprecate( + 'The container_default_behavior option will change its default value from "compatibility" to ' + '"no_defaults" in community.docker 2.0.0. To remove this warning, please specify an explicit value for it now', + version='2.0.0', collection_name='community.docker' # was Ansible 2.14 / community.general 3.0.0 + ) + if self.module.params['container_default_behavior'] == 'compatibility': + old_default_values = dict( + auto_remove=False, + detach=True, + init=False, + interactive=False, + memory="0", + paused=False, + privileged=False, + read_only=False, + tty=False, + ) + for param, value in old_default_values.items(): + if self.module.params[param] is None: + self.module.params[param] = value + + +def main(): + argument_spec = dict( + auto_remove=dict(type='bool'), + blkio_weight=dict(type='int'), + capabilities=dict(type='list', elements='str'), + cap_drop=dict(type='list', elements='str'), + cgroup_parent=dict(type='str'), + cleanup=dict(type='bool', default=False), + command=dict(type='raw'), + comparisons=dict(type='dict'), + container_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']), + cpu_period=dict(type='int'), + cpu_quota=dict(type='int'), + cpus=dict(type='float'), + cpuset_cpus=dict(type='str'), + cpuset_mems=dict(type='str'), + cpu_shares=dict(type='int'), + default_host_ip=dict(type='str'), + detach=dict(type='bool'), + devices=dict(type='list', elements='str'), + device_read_bps=dict(type='list', elements='dict', options=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='str'), + )), + device_write_bps=dict(type='list', elements='dict', options=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='str'), + )), + device_read_iops=dict(type='list', elements='dict', options=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='int'), + )), + device_write_iops=dict(type='list', elements='dict', options=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='int'), + )), + device_requests=dict(type='list', elements='dict', options=dict( + capabilities=dict(type='list', elements='list'), + count=dict(type='int'), + device_ids=dict(type='list', elements='str'), + driver=dict(type='str'), + options=dict(type='dict'), + )), + dns_servers=dict(type='list', elements='str'), + dns_opts=dict(type='list', elements='str'), + dns_search_domains=dict(type='list', elements='str'), + domainname=dict(type='str'), + entrypoint=dict(type='list', elements='str'), + env=dict(type='dict'), + env_file=dict(type='path'), + etc_hosts=dict(type='dict'), + exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']), + force_kill=dict(type='bool', default=False, aliases=['forcekill']), + groups=dict(type='list', elements='str'), + healthcheck=dict(type='dict', options=dict( + test=dict(type='raw'), + interval=dict(type='str'), + timeout=dict(type='str'), + start_period=dict(type='str'), + retries=dict(type='int'), + )), + hostname=dict(type='str'), + ignore_image=dict(type='bool', default=False), + image=dict(type='str'), + init=dict(type='bool'), + interactive=dict(type='bool'), + ipc_mode=dict(type='str'), + keep_volumes=dict(type='bool', default=True), + kernel_memory=dict(type='str'), + kill_signal=dict(type='str'), + labels=dict(type='dict'), + links=dict(type='list', elements='str'), + log_driver=dict(type='str'), + log_options=dict(type='dict', aliases=['log_opt']), + mac_address=dict(type='str'), + memory=dict(type='str'), + memory_reservation=dict(type='str'), + memory_swap=dict(type='str'), + memory_swappiness=dict(type='int'), + mounts=dict(type='list', elements='dict', options=dict( + target=dict(type='str', required=True), + source=dict(type='str'), + type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'), + read_only=dict(type='bool'), + consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']), + propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']), + no_copy=dict(type='bool'), + labels=dict(type='dict'), + volume_driver=dict(type='str'), + volume_options=dict(type='dict'), + tmpfs_size=dict(type='str'), + tmpfs_mode=dict(type='str'), + )), + name=dict(type='str', required=True), + network_mode=dict(type='str'), + networks=dict(type='list', elements='dict', options=dict( + name=dict(type='str', required=True), + ipv4_address=dict(type='str'), + ipv6_address=dict(type='str'), + aliases=dict(type='list', elements='str'), + links=dict(type='list', elements='str'), + )), + networks_cli_compatible=dict(type='bool', default=True), + oom_killer=dict(type='bool'), + oom_score_adj=dict(type='int'), + output_logs=dict(type='bool', default=False), + paused=dict(type='bool'), + pid_mode=dict(type='str'), + pids_limit=dict(type='int'), + privileged=dict(type='bool'), + published_ports=dict(type='list', elements='str', aliases=['ports']), + pull=dict(type='bool', default=False), + purge_networks=dict(type='bool', default=False), + read_only=dict(type='bool'), + recreate=dict(type='bool', default=False), + removal_wait_timeout=dict(type='float'), + restart=dict(type='bool', default=False), + restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']), + restart_retries=dict(type='int'), + runtime=dict(type='str'), + security_opts=dict(type='list', elements='str'), + shm_size=dict(type='str'), + state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), + stop_signal=dict(type='str'), + stop_timeout=dict(type='int'), + sysctls=dict(type='dict'), + tmpfs=dict(type='list', elements='str'), + tty=dict(type='bool'), + ulimits=dict(type='list', elements='str'), + user=dict(type='str'), + userns_mode=dict(type='str'), + uts=dict(type='str'), + volume_driver=dict(type='str'), + volumes=dict(type='list', elements='str'), + volumes_from=dict(type='list', elements='str'), + working_dir=dict(type='str'), + ) + + required_if = [ + ('state', 'present', ['image']) + ] + + client = AnsibleDockerClientContainer( + argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True, + min_docker_api_version='1.20', + ) + if client.module.params['networks_cli_compatible'] is True and client.module.params['networks'] and client.module.params['network_mode'] is None: + client.module.deprecate( + 'Please note that the default value for `network_mode` will change from not specified ' + '(which is equal to `default`) to the name of the first network in `networks` if ' + '`networks` has at least one entry and `networks_cli_compatible` is `true`. You can ' + 'change the behavior now by explicitly setting `network_mode` to the name of the first ' + 'network in `networks`, and remove this warning by setting `network_mode` to `default`. ' + 'Please make sure that the value you set to `network_mode` equals the inspection result ' + 'for existing containers, otherwise the module will recreate them. You can find out the ' + 'correct value by running "docker inspect --format \'{{.HostConfig.NetworkMode}}\' <container_name>"', + version='2.0.0', collection_name='community.docker', # was Ansible 2.14 / community.general 3.0.0 + ) + + try: + cm = ContainerManager(client) + client.module.exit_json(**sanitize_result(cm.results)) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_container_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_container_info.py new file mode 100644 index 00000000..92a73525 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_container_info.py @@ -0,0 +1,145 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_container_info + +short_description: Retrieves facts about docker container + +description: + - Retrieves facts about a docker container. + - Essentially returns the output of C(docker inspect <name>), similar to what M(community.docker.docker_container) + returns for a non-absent container. + + +options: + name: + description: + - The name of the container to inspect. + - When identifying an existing container name may be a name or a long or short container ID. + type: str + required: yes +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + + +author: + - "Felix Fontein (@felixfontein)" + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.20" +''' + +EXAMPLES = ''' +- name: Get infos on container + community.docker.docker_container_info: + name: mydata + register: result + +- name: Does container exist? + ansible.builtin.debug: + msg: "The container {{ 'exists' if result.exists else 'does not exist' }}" + +- name: Print information about container + ansible.builtin.debug: + var: result.container + when: result.exists +''' + +RETURN = ''' +exists: + description: + - Returns whether the container exists. + type: bool + returned: always + sample: true +container: + description: + - Facts representing the current state of the container. Matches the docker inspection output. + - Will be C(none) if container does not exist. + returned: always + type: dict + sample: '{ + "AppArmorProfile": "", + "Args": [], + "Config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/usr/bin/supervisord" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "443/tcp": {}, + "80/tcp": {} + }, + "Hostname": "8e47bf643eb9", + "Image": "lnmp_nginx:v1", + "Labels": {}, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/tmp/lnmp/nginx-sites/logs/": {} + }, + ... + }' +''' + +import traceback + +try: + from docker.errors import DockerException +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClient, + RequestException, +) + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_api_version='1.20', + ) + + try: + container = client.get_container(client.module.params['name']) + + client.module.exit_json( + changed=False, + exists=(True if container else False), + container=container, + ) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_host_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_host_info.py new file mode 100644 index 00000000..9007221a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_host_info.py @@ -0,0 +1,343 @@ +#!/usr/bin/python +# +# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_host_info + +short_description: Retrieves facts about docker host and lists of objects of the services. + +description: + - Retrieves facts about a docker host. + - Essentially returns the output of C(docker system info). + - The module also allows to list object names for containers, images, networks and volumes. + It also allows to query information on disk usage. + - The output differs depending on API version of the docker daemon. + - If the docker daemon cannot be contacted or does not meet the API version requirements, + the module will fail. + + +options: + containers: + description: + - Whether to list containers. + type: bool + default: no + containers_filters: + description: + - A dictionary of filter values used for selecting containers to list. + - "For example, C(until: 24h)." + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering) + for more information on possible filters. + type: dict + images: + description: + - Whether to list images. + type: bool + default: no + images_filters: + description: + - A dictionary of filter values used for selecting images to list. + - "For example, C(dangling: true)." + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering) + for more information on possible filters. + type: dict + networks: + description: + - Whether to list networks. + type: bool + default: no + networks_filters: + description: + - A dictionary of filter values used for selecting networks to list. + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering) + for more information on possible filters. + type: dict + volumes: + description: + - Whether to list volumes. + type: bool + default: no + volumes_filters: + description: + - A dictionary of filter values used for selecting volumes to list. + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering) + for more information on possible filters. + type: dict + disk_usage: + description: + - Summary information on used disk space by all Docker layers. + - The output is a sum of images, volumes, containers and build cache. + type: bool + default: no + verbose_output: + description: + - When set to C(yes) and I(networks), I(volumes), I(images), I(containers) or I(disk_usage) is set to C(yes) + then output will contain verbose information about objects matching the full output of API method. + For details see the documentation of your version of Docker API at L(https://docs.docker.com/engine/api/). + - The verbose output in this module contains only subset of information returned by I(_info) module + for each type of the objects. + type: bool + default: no +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + + +author: + - Piotr Wojciechowski (@WojciechowskiPiotr) + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.21" +''' + +EXAMPLES = ''' +- name: Get info on docker host + community.docker.docker_host_info: + register: result + +- name: Get info on docker host and list images + community.docker.docker_host_info: + images: yes + register: result + +- name: Get info on docker host and list images matching the filter + community.docker.docker_host_info: + images: yes + images_filters: + label: "mylabel" + register: result + +- name: Get info on docker host and verbose list images + community.docker.docker_host_info: + images: yes + verbose_output: yes + register: result + +- name: Get info on docker host and used disk space + community.docker.docker_host_info: + disk_usage: yes + register: result + +- ansible.builtin.debug: + var: result.host_info + +''' + +RETURN = ''' +can_talk_to_docker: + description: + - Will be C(true) if the module can talk to the docker daemon. + returned: both on success and on error + type: bool + +host_info: + description: + - Facts representing the basic state of the docker host. Matches the C(docker system info) output. + returned: always + type: dict +volumes: + description: + - List of dict objects containing the basic information about each volume. + Keys matches the C(docker volume ls) output unless I(verbose_output=yes). + See description for I(verbose_output). + returned: When I(volumes) is C(yes) + type: list + elements: dict +networks: + description: + - List of dict objects containing the basic information about each network. + Keys matches the C(docker network ls) output unless I(verbose_output=yes). + See description for I(verbose_output). + returned: When I(networks) is C(yes) + type: list + elements: dict +containers: + description: + - List of dict objects containing the basic information about each container. + Keys matches the C(docker container ls) output unless I(verbose_output=yes). + See description for I(verbose_output). + returned: When I(containers) is C(yes) + type: list + elements: dict +images: + description: + - List of dict objects containing the basic information about each image. + Keys matches the C(docker image ls) output unless I(verbose_output=yes). + See description for I(verbose_output). + returned: When I(images) is C(yes) + type: list + elements: dict +disk_usage: + description: + - Information on summary disk usage by images, containers and volumes on docker host + unless I(verbose_output=yes). See description for I(verbose_output). + returned: When I(disk_usage) is C(yes) + type: dict + +''' + +import traceback + +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClient, + DockerBaseClass, + RequestException, +) +from ansible.module_utils._text import to_native + +try: + from docker.errors import DockerException, APIError +except ImportError: + # Missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.docker.plugins.module_utils.common import clean_dict_booleans_for_docker_api + + +class DockerHostManager(DockerBaseClass): + + def __init__(self, client, results): + + super(DockerHostManager, self).__init__() + + self.client = client + self.results = results + self.verbose_output = self.client.module.params['verbose_output'] + + listed_objects = ['volumes', 'networks', 'containers', 'images'] + + self.results['host_info'] = self.get_docker_host_info() + + if self.client.module.params['disk_usage']: + self.results['disk_usage'] = self.get_docker_disk_usage_facts() + + for docker_object in listed_objects: + if self.client.module.params[docker_object]: + returned_name = docker_object + filter_name = docker_object + "_filters" + filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name)) + self.results[returned_name] = self.get_docker_items_list(docker_object, filters) + + def get_docker_host_info(self): + try: + return self.client.info() + except APIError as exc: + self.client.fail("Error inspecting docker host: %s" % to_native(exc)) + + def get_docker_disk_usage_facts(self): + try: + if self.verbose_output: + return self.client.df() + else: + return dict(LayersSize=self.client.df()['LayersSize']) + except APIError as exc: + self.client.fail("Error inspecting docker host: %s" % to_native(exc)) + + def get_docker_items_list(self, docker_object=None, filters=None, verbose=False): + items = None + items_list = [] + + header_containers = ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports', 'Names'] + header_volumes = ['Driver', 'Name'] + header_images = ['Id', 'RepoTags', 'Created', 'Size'] + header_networks = ['Id', 'Driver', 'Name', 'Scope'] + + filter_arg = dict() + if filters: + filter_arg['filters'] = filters + try: + if docker_object == 'containers': + items = self.client.containers(**filter_arg) + elif docker_object == 'networks': + items = self.client.networks(**filter_arg) + elif docker_object == 'images': + items = self.client.images(**filter_arg) + elif docker_object == 'volumes': + items = self.client.volumes(**filter_arg) + except APIError as exc: + self.client.fail("Error inspecting docker host for object '%s': %s" % + (docker_object, to_native(exc))) + + if self.verbose_output: + if docker_object != 'volumes': + return items + else: + return items['Volumes'] + + if docker_object == 'volumes': + items = items['Volumes'] + + for item in items: + item_record = dict() + + if docker_object == 'containers': + for key in header_containers: + item_record[key] = item.get(key) + elif docker_object == 'networks': + for key in header_networks: + item_record[key] = item.get(key) + elif docker_object == 'images': + for key in header_images: + item_record[key] = item.get(key) + elif docker_object == 'volumes': + for key in header_volumes: + item_record[key] = item.get(key) + items_list.append(item_record) + + return items_list + + +def main(): + argument_spec = dict( + containers=dict(type='bool', default=False), + containers_filters=dict(type='dict'), + images=dict(type='bool', default=False), + images_filters=dict(type='dict'), + networks=dict(type='bool', default=False), + networks_filters=dict(type='dict'), + volumes=dict(type='bool', default=False), + volumes_filters=dict(type='dict'), + disk_usage=dict(type='bool', default=False), + verbose_output=dict(type='bool', default=False), + ) + + option_minimal_versions = dict( + network_filters=dict(docker_py_version='2.0.2'), + disk_usage=dict(docker_py_version='2.2.0'), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='1.10.0', + min_docker_api_version='1.21', + option_minimal_versions=option_minimal_versions, + fail_results=dict( + can_talk_to_docker=False, + ), + ) + client.fail_results['can_talk_to_docker'] = True + + try: + results = dict( + changed=False, + ) + + DockerHostManager(client, results) + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_image.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_image.py new file mode 100644 index 00000000..07728813 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_image.py @@ -0,0 +1,854 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_image + +short_description: Manage docker images. + + +description: + - Build, load or pull an image, making the image available for creating containers. Also supports tagging an + image into a repository and archiving an image to a C(.tar) file. + +options: + source: + description: + - "Determines where the module will try to retrieve the image from." + - "Use C(build) to build the image from a C(Dockerfile). I(build.path) must + be specified when this value is used." + - "Use C(load) to load the image from a C(.tar) file. I(load_path) must + be specified when this value is used." + - "Use C(pull) to pull the image from a registry." + - "Use C(local) to make sure that the image is already available on the local + docker daemon, i.e. do not try to build, pull or load the image." + type: str + choices: + - build + - load + - pull + - local + build: + description: + - "Specifies options used for building images." + type: dict + suboptions: + cache_from: + description: + - List of image names to consider as cache source. + type: list + elements: str + dockerfile: + description: + - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image. + - This can also include a relative path (relative to I(path)). + type: str + http_timeout: + description: + - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of + seconds. + type: int + path: + description: + - Use with state 'present' to build an image. Will be the path to a directory containing the context and + Dockerfile for building an image. + type: path + required: yes + pull: + description: + - When building an image downloads any updates to the FROM image in Dockerfile. + type: bool + default: no + rm: + description: + - Remove intermediate containers after build. + type: bool + default: yes + network: + description: + - The network to use for C(RUN) build instructions. + type: str + nocache: + description: + - Do not use cache when building an image. + type: bool + default: no + etc_hosts: + description: + - Extra hosts to add to C(/etc/hosts) in building containers, as a mapping of hostname to IP address. + type: dict + args: + description: + - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive. + - Docker expects the value to be a string. For convenience any non-string values will be converted to strings. + - Requires Docker API >= 1.21. + type: dict + container_limits: + description: + - A dictionary of limits applied to each container created by the build process. + type: dict + suboptions: + memory: + description: + - Set memory limit for build. + type: int + memswap: + description: + - Total memory (memory + swap), -1 to disable swap. + type: int + cpushares: + description: + - CPU shares (relative weight). + type: int + cpusetcpus: + description: + - CPUs in which to allow execution, e.g., "0-3", "0,1". + type: str + use_config_proxy: + description: + - If set to C(yes) and a proxy configuration is specified in the docker client configuration + (by default C($HOME/.docker/config.json)), the corresponding environment variables will + be set in the container being built. + - Needs Docker SDK for Python >= 3.7.0. + type: bool + target: + description: + - When building an image specifies an intermediate build stage by + name as a final stage for the resulting image. + type: str + platform: + description: + - Platform in the format C(os[/arch[/variant]]). + type: str + version_added: 1.1.0 + archive_path: + description: + - Use with state C(present) to archive an image to a .tar file. + type: path + load_path: + description: + - Use with state C(present) to load an image from a .tar file. + - Set I(source) to C(load) if you want to load the image. + type: path + force_source: + description: + - Use with state C(present) to build, load or pull an image (depending on the + value of the I(source) option) when the image already exists. + type: bool + default: false + force_absent: + description: + - Use with state I(absent) to un-tag and remove all images matching the specified name. + type: bool + default: false + force_tag: + description: + - Use with state C(present) to force tagging an image. + type: bool + default: false + name: + description: + - "Image name. Name format will be one of: name, repository/name, registry_server:port/name. + When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'." + - Note that image IDs (hashes) are not supported. + type: str + required: yes + push: + description: + - Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter. + type: bool + default: no + repository: + description: + - Full path to a repository. Use with state C(present) to tag the image into the repository. Expects + format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest). + type: str + state: + description: + - Make assertions about the state of an image. + - When C(absent) an image will be removed. Use the force option to un-tag and remove all images + matching the provided name. + - When C(present) check if an image exists using the provided name and tag. If the image is not found or the + force option is used, the image will either be pulled, built or loaded, depending on the I(source) option. + type: str + default: present + choices: + - absent + - present + tag: + description: + - Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to + I(latest). + - If I(name) parameter format is I(name:tag), then tag value from I(name) will take precedence. + type: str + default: latest + +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.20" + +author: + - Pavel Antonov (@softzilla) + - Chris Houseknecht (@chouseknecht) + - Sorin Sbarnea (@ssbarnea) + +''' + +EXAMPLES = ''' + +- name: Pull an image + community.docker.docker_image: + name: pacur/centos-7 + source: pull + +- name: Tag and push to docker hub + community.docker.docker_image: + name: pacur/centos-7:56 + repository: dcoppenhagan/myimage:7.56 + push: yes + source: local + +- name: Tag and push to local registry + community.docker.docker_image: + # Image will be centos:7 + name: centos + # Will be pushed to localhost:5000/centos:7 + repository: localhost:5000/centos + tag: 7 + push: yes + source: local + +- name: Add tag latest to image + community.docker.docker_image: + name: myimage:7.1.2 + repository: myimage:latest + # As 'latest' usually already is present, we need to enable overwriting of existing tags: + force_tag: yes + source: local + +- name: Remove image + community.docker.docker_image: + state: absent + name: registry.ansible.com/chouseknecht/sinatra + tag: v1 + +- name: Build an image and push it to a private repo + community.docker.docker_image: + build: + path: ./sinatra + name: registry.ansible.com/chouseknecht/sinatra + tag: v1 + push: yes + source: build + +- name: Archive image + community.docker.docker_image: + name: registry.ansible.com/chouseknecht/sinatra + tag: v1 + archive_path: my_sinatra.tar + source: local + +- name: Load image from archive and push to a private registry + community.docker.docker_image: + name: localhost:5000/myimages/sinatra + tag: v1 + push: yes + load_path: my_sinatra.tar + source: load + +- name: Build image and with build args + community.docker.docker_image: + name: myimage + build: + path: /path/to/build/dir + args: + log_volume: /var/log/myapp + listen_port: 8080 + source: build + +- name: Build image using cache source + community.docker.docker_image: + name: myimage:latest + build: + path: /path/to/build/dir + # Use as cache source for building myimage + cache_from: + - nginx:latest + - alpine:3.8 + source: build +''' + +RETURN = ''' +image: + description: Image inspection results for the affected image. + returned: success + type: dict + sample: {} +stdout: + description: Docker build output when building an image. + returned: success + type: str + sample: "" + version_added: 1.0.0 +''' + +import errno +import os +import re +import traceback + +from distutils.version import LooseVersion + +from ansible_collections.community.docker.plugins.module_utils.common import ( + clean_dict_booleans_for_docker_api, + docker_version, + AnsibleDockerClient, + DockerBaseClass, + is_image_name_id, + is_valid_tag, + RequestException, +) +from ansible.module_utils._text import to_native + +if docker_version is not None: + try: + if LooseVersion(docker_version) >= LooseVersion('2.0.0'): + from docker.auth import resolve_repository_name + else: + from docker.auth.auth import resolve_repository_name + from docker.utils.utils import parse_repository_tag + from docker.errors import DockerException + except ImportError: + # missing Docker SDK for Python handled in module_utils.docker.common + pass + + +class ImageManager(DockerBaseClass): + + def __init__(self, client, results): + + super(ImageManager, self).__init__() + + self.client = client + self.results = results + parameters = self.client.module.params + self.check_mode = self.client.check_mode + + self.source = parameters['source'] + build = parameters['build'] or dict() + self.archive_path = parameters.get('archive_path') + self.cache_from = build.get('cache_from') + self.container_limits = build.get('container_limits') + self.dockerfile = build.get('dockerfile') + self.force_source = parameters.get('force_source') + self.force_absent = parameters.get('force_absent') + self.force_tag = parameters.get('force_tag') + self.load_path = parameters.get('load_path') + self.name = parameters.get('name') + self.network = build.get('network') + self.extra_hosts = clean_dict_booleans_for_docker_api(build.get('etc_hosts')) + self.nocache = build.get('nocache', False) + self.build_path = build.get('path') + self.pull = build.get('pull') + self.target = build.get('target') + self.repository = parameters.get('repository') + self.rm = build.get('rm', True) + self.state = parameters.get('state') + self.tag = parameters.get('tag') + self.http_timeout = build.get('http_timeout') + self.push = parameters.get('push') + self.buildargs = build.get('args') + self.build_platform = build.get('platform') + self.use_config_proxy = build.get('use_config_proxy') + + # If name contains a tag, it takes precedence over tag parameter. + if not is_image_name_id(self.name): + repo, repo_tag = parse_repository_tag(self.name) + if repo_tag: + self.name = repo + self.tag = repo_tag + + if self.state == 'present': + self.present() + elif self.state == 'absent': + self.absent() + + def fail(self, msg): + self.client.fail(msg) + + def present(self): + ''' + Handles state = 'present', which includes building, loading or pulling an image, + depending on user provided parameters. + + :returns None + ''' + image = self.client.find_image(name=self.name, tag=self.tag) + + if not image or self.force_source: + if self.source == 'build': + # Build the image + if not os.path.isdir(self.build_path): + self.fail("Requested build path %s could not be found or you do not have access." % self.build_path) + image_name = self.name + if self.tag: + image_name = "%s:%s" % (self.name, self.tag) + self.log("Building image %s" % image_name) + self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path)) + self.results['changed'] = True + if not self.check_mode: + self.results.update(self.build_image()) + + elif self.source == 'load': + # Load the image from an archive + if not os.path.isfile(self.load_path): + self.fail("Error loading image %s. Specified path %s does not exist." % (self.name, + self.load_path)) + image_name = self.name + if self.tag: + image_name = "%s:%s" % (self.name, self.tag) + self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path)) + self.results['changed'] = True + if not self.check_mode: + self.results['image'] = self.load_image() + elif self.source == 'pull': + # pull the image + self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag)) + self.results['changed'] = True + if not self.check_mode: + self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag) + elif self.source == 'local': + if image is None: + name = self.name + if self.tag: + name = "%s:%s" % (self.name, self.tag) + self.client.fail('Cannot find the image %s locally.' % name) + if not self.check_mode and image and image['Id'] == self.results['image']['Id']: + self.results['changed'] = False + + if self.archive_path: + self.archive_image(self.name, self.tag) + + if self.push and not self.repository: + self.push_image(self.name, self.tag) + elif self.repository: + self.tag_image(self.name, self.tag, self.repository, push=self.push) + + def absent(self): + ''' + Handles state = 'absent', which removes an image. + + :return None + ''' + name = self.name + if is_image_name_id(name): + image = self.client.find_image_by_id(name) + else: + image = self.client.find_image(name, self.tag) + if self.tag: + name = "%s:%s" % (self.name, self.tag) + if image: + if not self.check_mode: + try: + self.client.remove_image(name, force=self.force_absent) + except Exception as exc: + self.fail("Error removing image %s - %s" % (name, str(exc))) + + self.results['changed'] = True + self.results['actions'].append("Removed image %s" % (name)) + self.results['image']['state'] = 'Deleted' + + def archive_image(self, name, tag): + ''' + Archive an image to a .tar file. Called when archive_path is passed. + + :param name - name of the image. Type: str + :return None + ''' + + if not tag: + tag = "latest" + + image = self.client.find_image(name=name, tag=tag) + if not image: + self.log("archive image: image %s:%s not found" % (name, tag)) + return + + image_name = "%s:%s" % (name, tag) + self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path)) + self.results['changed'] = True + if not self.check_mode: + self.log("Getting archive of image %s" % image_name) + try: + image = self.client.get_image(image_name) + except Exception as exc: + self.fail("Error getting image %s - %s" % (image_name, str(exc))) + + try: + with open(self.archive_path, 'wb') as fd: + if self.client.docker_py_version >= LooseVersion('3.0.0'): + for chunk in image: + fd.write(chunk) + else: + for chunk in image.stream(2048, decode_content=False): + fd.write(chunk) + except Exception as exc: + self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc))) + + image = self.client.find_image(name=name, tag=tag) + if image: + self.results['image'] = image + + def push_image(self, name, tag=None): + ''' + If the name of the image contains a repository path, then push the image. + + :param name Name of the image to push. + :param tag Use a specific tag. + :return: None + ''' + + repository = name + if not tag: + repository, tag = parse_repository_tag(name) + registry, repo_name = resolve_repository_name(repository) + + self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag)) + + if registry: + self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag)) + self.results['changed'] = True + if not self.check_mode: + status = None + try: + changed = False + for line in self.client.push(repository, tag=tag, stream=True, decode=True): + self.log(line, pretty_print=True) + if line.get('errorDetail'): + raise Exception(line['errorDetail']['message']) + status = line.get('status') + if status == 'Pushing': + changed = True + self.results['changed'] = changed + except Exception as exc: + if re.search('unauthorized', str(exc)): + if re.search('authentication required', str(exc)): + self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." % + (registry, repo_name, tag, str(exc), registry)) + else: + self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" % + (registry, repo_name, tag, str(exc))) + self.fail("Error pushing image %s: %s" % (repository, str(exc))) + self.results['image'] = self.client.find_image(name=repository, tag=tag) + if not self.results['image']: + self.results['image'] = dict() + self.results['image']['push_status'] = status + + def tag_image(self, name, tag, repository, push=False): + ''' + Tag an image into a repository. + + :param name: name of the image. required. + :param tag: image tag. + :param repository: path to the repository. required. + :param push: bool. push the image once it's tagged. + :return: None + ''' + repo, repo_tag = parse_repository_tag(repository) + if not repo_tag: + repo_tag = "latest" + if tag: + repo_tag = tag + image = self.client.find_image(name=repo, tag=repo_tag) + found = 'found' if image else 'not found' + self.log("image %s was %s" % (repo, found)) + + if not image or self.force_tag: + self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag)) + self.results['changed'] = True + self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag)) + if not self.check_mode: + try: + # Finding the image does not always work, especially running a localhost registry. In those + # cases, if we don't set force=True, it errors. + image_name = name + if tag and not re.search(tag, name): + image_name = "%s:%s" % (name, tag) + tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True) + if not tag_status: + raise Exception("Tag operation failed.") + except Exception as exc: + self.fail("Error: failed to tag image - %s" % str(exc)) + self.results['image'] = self.client.find_image(name=repo, tag=repo_tag) + if image and image['Id'] == self.results['image']['Id']: + self.results['changed'] = False + + if push: + self.push_image(repo, repo_tag) + + @staticmethod + def _extract_output_line(line, output): + ''' + Extract text line from stream output and, if found, adds it to output. + ''' + if 'stream' in line or 'status' in line: + # Make sure we have a string (assuming that line['stream'] and + # line['status'] are either not defined, falsish, or a string) + text_line = line.get('stream') or line.get('status') or '' + output.append(text_line) + + def build_image(self): + ''' + Build an image + + :return: image dict + ''' + params = dict( + path=self.build_path, + tag=self.name, + rm=self.rm, + nocache=self.nocache, + timeout=self.http_timeout, + pull=self.pull, + forcerm=self.rm, + dockerfile=self.dockerfile, + decode=True, + ) + if self.client.docker_py_version < LooseVersion('3.0.0'): + params['stream'] = True + + if self.tag: + params['tag'] = "%s:%s" % (self.name, self.tag) + if self.container_limits: + params['container_limits'] = self.container_limits + if self.buildargs: + for key, value in self.buildargs.items(): + self.buildargs[key] = to_native(value) + params['buildargs'] = self.buildargs + if self.cache_from: + params['cache_from'] = self.cache_from + if self.network: + params['network_mode'] = self.network + if self.extra_hosts: + params['extra_hosts'] = self.extra_hosts + if self.use_config_proxy: + params['use_config_proxy'] = self.use_config_proxy + # Due to a bug in docker-py, it will crash if + # use_config_proxy is True and buildargs is None + if 'buildargs' not in params: + params['buildargs'] = {} + if self.target: + params['target'] = self.target + if self.build_platform is not None: + params['platform'] = self.build_platform + + build_output = [] + for line in self.client.build(**params): + # line = json.loads(line) + self.log(line, pretty_print=True) + self._extract_output_line(line, build_output) + + if line.get('error'): + if line.get('errorDetail'): + errorDetail = line.get('errorDetail') + self.fail( + "Error building %s - code: %s, message: %s, logs: %s" % ( + self.name, + errorDetail.get('code'), + errorDetail.get('message'), + build_output)) + else: + self.fail("Error building %s - message: %s, logs: %s" % ( + self.name, line.get('error'), build_output)) + + return {"stdout": "\n".join(build_output), + "image": self.client.find_image(name=self.name, tag=self.tag)} + + def load_image(self): + ''' + Load an image from a .tar archive + + :return: image dict + ''' + # Load image(s) from file + load_output = [] + has_output = False + try: + self.log("Opening image %s" % self.load_path) + with open(self.load_path, 'rb') as image_tar: + self.log("Loading image from %s" % self.load_path) + output = self.client.load_image(image_tar) + if output is not None: + # Old versions of Docker SDK of Python (before version 2.5.0) do not return anything. + # (See https://github.com/docker/docker-py/commit/7139e2d8f1ea82340417add02090bfaf7794f159) + # Note that before that commit, something else than None was returned, but that was also + # only introduced in a commit that first appeared in 2.5.0 (see + # https://github.com/docker/docker-py/commit/9e793806ff79559c3bc591d8c52a3bbe3cdb7350). + # So the above check works for every released version of Docker SDK for Python. + has_output = True + for line in output: + self.log(line, pretty_print=True) + self._extract_output_line(line, load_output) + else: + if LooseVersion(docker_version) < LooseVersion('2.5.0'): + self.client.module.warn( + 'The installed version of the Docker SDK for Python does not return the loading results' + ' from the Docker daemon. Therefore, we cannot verify whether the expected image was' + ' loaded, whether multiple images where loaded, or whether the load actually succeeded.' + ' If you are not stuck with Python 2.6, *please* upgrade to a version newer than 2.5.0' + ' (2.5.0 was released in August 2017).' + ) + else: + self.client.module.warn( + 'The API version of your Docker daemon is < 1.23, which does not return the image' + ' loading result from the Docker daemon. Therefore, we cannot verify whether the' + ' expected image was loaded, whether multiple images where loaded, or whether the load' + ' actually succeeded. You should consider upgrading your Docker daemon.' + ) + except EnvironmentError as exc: + if exc.errno == errno.ENOENT: + self.client.fail("Error opening image %s - %s" % (self.load_path, str(exc))) + self.client.fail("Error loading image %s - %s" % (self.name, str(exc)), stdout='\n'.join(load_output)) + except Exception as exc: + self.client.fail("Error loading image %s - %s" % (self.name, str(exc)), stdout='\n'.join(load_output)) + + # Collect loaded images + if has_output: + # We can only do this when we actually got some output from Docker daemon + loaded_images = set() + for line in load_output: + if line.startswith('Loaded image:'): + loaded_images.add(line[len('Loaded image:'):].strip()) + + if not loaded_images: + self.client.fail("Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output)) + + expected_image = '%s:%s' % (self.name, self.tag) + if expected_image not in loaded_images: + self.client.fail( + "The archive did not contain image '%s'. Instead, found %s." % ( + expected_image, ', '.join(["'%s'" % image for image in sorted(loaded_images)])), + stdout='\n'.join(load_output)) + loaded_images.remove(expected_image) + + if loaded_images: + self.client.module.warn( + "The archive contained more images than specified: %s" % ( + ', '.join(["'%s'" % image for image in sorted(loaded_images)]), )) + + return self.client.find_image(self.name, self.tag) + + +def main(): + argument_spec = dict( + source=dict(type='str', choices=['build', 'load', 'pull', 'local']), + build=dict(type='dict', options=dict( + cache_from=dict(type='list', elements='str'), + container_limits=dict(type='dict', options=dict( + memory=dict(type='int'), + memswap=dict(type='int'), + cpushares=dict(type='int'), + cpusetcpus=dict(type='str'), + )), + dockerfile=dict(type='str'), + http_timeout=dict(type='int'), + network=dict(type='str'), + nocache=dict(type='bool', default=False), + path=dict(type='path', required=True), + pull=dict(type='bool', default=False), + rm=dict(type='bool', default=True), + args=dict(type='dict'), + use_config_proxy=dict(type='bool'), + target=dict(type='str'), + etc_hosts=dict(type='dict'), + platform=dict(type='str'), + )), + archive_path=dict(type='path'), + force_source=dict(type='bool', default=False), + force_absent=dict(type='bool', default=False), + force_tag=dict(type='bool', default=False), + load_path=dict(type='path'), + name=dict(type='str', required=True), + push=dict(type='bool', default=False), + repository=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present']), + tag=dict(type='str', default='latest'), + ) + + required_if = [ + ('state', 'present', ['source']), + ('source', 'build', ['build']), + ('source', 'load', ['load_path']), + ] + + def detect_build_cache_from(client): + return client.module.params['build'] and client.module.params['build'].get('cache_from') is not None + + def detect_build_network(client): + return client.module.params['build'] and client.module.params['build'].get('network') is not None + + def detect_build_target(client): + return client.module.params['build'] and client.module.params['build'].get('target') is not None + + def detect_use_config_proxy(client): + return client.module.params['build'] and client.module.params['build'].get('use_config_proxy') is not None + + def detect_etc_hosts(client): + return client.module.params['build'] and bool(client.module.params['build'].get('etc_hosts')) + + def detect_platform(client): + return client.module.params['build'] and client.module.params['build'].get('platform') is not None + + option_minimal_versions = dict() + option_minimal_versions["build.cache_from"] = dict(docker_py_version='2.1.0', docker_api_version='1.25', detect_usage=detect_build_cache_from) + option_minimal_versions["build.network"] = dict(docker_py_version='2.4.0', docker_api_version='1.25', detect_usage=detect_build_network) + option_minimal_versions["build.target"] = dict(docker_py_version='2.4.0', detect_usage=detect_build_target) + option_minimal_versions["build.use_config_proxy"] = dict(docker_py_version='3.7.0', detect_usage=detect_use_config_proxy) + option_minimal_versions["build.etc_hosts"] = dict(docker_py_version='2.6.0', docker_api_version='1.27', detect_usage=detect_etc_hosts) + option_minimal_versions["build.platform"] = dict(docker_py_version='3.0.0', docker_api_version='1.32', detect_usage=detect_platform) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True, + min_docker_version='1.8.0', + min_docker_api_version='1.20', + option_minimal_versions=option_minimal_versions, + ) + + if not is_valid_tag(client.module.params['tag'], allow_empty=True): + client.fail('"{0}" is not a valid docker tag!'.format(client.module.params['tag'])) + + if client.module.params['source'] == 'build': + if not client.module.params['build'] or not client.module.params['build'].get('path'): + client.fail('If "source" is set to "build", the "build.path" option must be specified.') + + try: + results = dict( + changed=False, + actions=[], + image={} + ) + + ImageManager(client, results) + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_image_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_image_info.py new file mode 100644 index 00000000..5d855fa2 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_image_info.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_image_info + +short_description: Inspect docker images + + +description: + - Provide one or more image names, and the module will inspect each, returning an array of inspection results. + - If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists + locally, you can call the module with the image name, then check whether the result list is empty (image does not + exist) or has one element (the image exists locally). + - The module will not attempt to pull images from registries. Use M(community.docker.docker_image) with I(source) set to C(pull) + to ensure an image is pulled. + +notes: + - This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change. + +options: + name: + description: + - An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]), + where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also + image IDs can be used. + - If no name is provided, a list of all images will be returned. + type: list + elements: str + +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.20" + +author: + - Chris Houseknecht (@chouseknecht) + +''' + +EXAMPLES = ''' +- name: Inspect a single image + community.docker.docker_image_info: + name: pacur/centos-7 + +- name: Inspect multiple images + community.docker.docker_image_info: + name: + - pacur/centos-7 + - sinatra + register: result + +- name: Make sure that both images pacur/centos-7 and sinatra exist locally + ansible.builtin.assert: + that: + - result.images | length == 2 +''' + +RETURN = ''' +images: + description: + - Inspection results for the selected images. + - The list only contains inspection results of images existing locally. + returned: always + type: list + elements: dict + sample: [ + { + "Architecture": "amd64", + "Author": "", + "Comment": "", + "Config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/etc/docker/registry/config.yml" + ], + "Domainname": "", + "Entrypoint": [ + "/bin/registry" + ], + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "5000/tcp": {} + }, + "Hostname": "e5c68db50333", + "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/var/lib/registry": {} + }, + "WorkingDir": "" + }, + "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610", + "ContainerConfig": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + '#(nop) CMD ["/etc/docker/registry/config.yml"]' + ], + "Domainname": "", + "Entrypoint": [ + "/bin/registry" + ], + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "5000/tcp": {} + }, + "Hostname": "e5c68db50333", + "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/var/lib/registry": {} + }, + "WorkingDir": "" + }, + "Created": "2016-03-08T21:08:15.399680378Z", + "DockerVersion": "1.9.1", + "GraphDriver": { + "Data": null, + "Name": "aufs" + }, + "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08", + "Name": "registry:2", + "Os": "linux", + "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805", + "RepoDigests": [], + "RepoTags": [ + "registry:2" + ], + "Size": 0, + "VirtualSize": 165808884 + } + ] +''' + +import traceback + +try: + from docker import utils + from docker.errors import DockerException +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClient, + DockerBaseClass, + is_image_name_id, + RequestException, +) + + +class ImageManager(DockerBaseClass): + + def __init__(self, client, results): + + super(ImageManager, self).__init__() + + self.client = client + self.results = results + self.name = self.client.module.params.get('name') + self.log("Gathering facts for images: %s" % (str(self.name))) + + if self.name: + self.results['images'] = self.get_facts() + else: + self.results['images'] = self.get_all_images() + + def fail(self, msg): + self.client.fail(msg) + + def get_facts(self): + ''' + Lookup and inspect each image name found in the names parameter. + + :returns array of image dictionaries + ''' + + results = [] + + names = self.name + if not isinstance(names, list): + names = [names] + + for name in names: + if is_image_name_id(name): + self.log('Fetching image %s (ID)' % (name)) + image = self.client.find_image_by_id(name) + else: + repository, tag = utils.parse_repository_tag(name) + if not tag: + tag = 'latest' + self.log('Fetching image %s:%s' % (repository, tag)) + image = self.client.find_image(name=repository, tag=tag) + if image: + results.append(image) + return results + + def get_all_images(self): + results = [] + images = self.client.images() + for image in images: + try: + inspection = self.client.inspect_image(image['Id']) + except Exception as exc: + self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc))) + results.append(inspection) + return results + + +def main(): + argument_spec = dict( + name=dict(type='list', elements='str'), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_api_version='1.20', + ) + + try: + results = dict( + changed=False, + images=[] + ) + + ImageManager(client, results) + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_login.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_login.py new file mode 100644 index 00000000..03d3f2a1 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_login.py @@ -0,0 +1,486 @@ +#!/usr/bin/python +# +# (c) 2016 Olaf Kilian <olaf.kilian@symanex.com> +# Chris Houseknecht, <house@redhat.com> +# James Tanner, <jtanner@redhat.com> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_login +short_description: Log into a Docker registry. +description: + - Provides functionality similar to the "docker login" command. + - Authenticate with a docker registry and add the credentials to your local Docker config file respectively the + credentials store associated to the registry. Adding the credentials to the config files resp. the credential + store allows future connections to the registry using tools such as Ansible's Docker modules, the Docker CLI + and Docker SDK for Python without needing to provide credentials. + - Running in check mode will perform the authentication without updating the config file. +options: + registry_url: + description: + - The registry URL. + type: str + default: "https://index.docker.io/v1/" + aliases: + - registry + - url + username: + description: + - The username for the registry account. + - Required when I(state) is C(present). + type: str + password: + description: + - The plaintext password for the registry account. + - Required when I(state) is C(present). + type: str + email: + description: + - Does nothing, do not use. + - Will be removed in community.docker 2.0.0. + type: str + reauthorize: + description: + - Refresh existing authentication found in the configuration file. + type: bool + default: no + aliases: + - reauth + config_path: + description: + - Custom path to the Docker CLI configuration file. + type: path + default: ~/.docker/config.json + aliases: + - dockercfg_path + state: + description: + - This controls the current state of the user. C(present) will login in a user, C(absent) will log them out. + - To logout you only need the registry server, which defaults to DockerHub. + - Before 2.1 you could ONLY log in. + - Docker does not support 'logout' with a custom config file. + type: str + default: 'present' + choices: ['present', 'absent'] + +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "L(Python bindings for docker credentials store API) >= 0.2.1 + (use L(docker-pycreds,https://pypi.org/project/docker-pycreds/) when using Docker SDK for Python < 4.0.0)" + - "Docker API >= 1.20" +author: + - Olaf Kilian (@olsaki) <olaf.kilian@symanex.com> + - Chris Houseknecht (@chouseknecht) +''' + +EXAMPLES = ''' + +- name: Log into DockerHub + community.docker.docker_login: + username: docker + password: rekcod + +- name: Log into private registry and force re-authorization + community.docker.docker_login: + registry_url: your.private.registry.io + username: yourself + password: secrets3 + reauthorize: yes + +- name: Log into DockerHub using a custom config file + community.docker.docker_login: + username: docker + password: rekcod + config_path: /tmp/.mydockercfg + +- name: Log out of DockerHub + community.docker.docker_login: + state: absent +''' + +RETURN = ''' +login_results: + description: Results from the login. + returned: when state='present' + type: dict + sample: { + "serveraddress": "localhost:5000", + "username": "testuser" + } +''' + +import base64 +import json +import os +import re +import traceback +from ansible.module_utils._text import to_bytes, to_text + +try: + from docker.errors import DockerException + from docker import auth + + # Earlier versions of docker/docker-py put decode_auth + # in docker.auth.auth instead of docker.auth + if hasattr(auth, 'decode_auth'): + from docker.auth import decode_auth + else: + from docker.auth.auth import decode_auth + +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClient, + HAS_DOCKER_PY, + DEFAULT_DOCKER_REGISTRY, + DockerBaseClass, + EMAIL_REGEX, + RequestException, +) + +NEEDS_DOCKER_PYCREDS = False + +# Early versions of docker/docker-py rely on docker-pycreds for +# the credential store api. +if HAS_DOCKER_PY: + try: + from docker.credentials.errors import StoreError, CredentialsNotFound + from docker.credentials import Store + except ImportError: + try: + from dockerpycreds.errors import StoreError, CredentialsNotFound + from dockerpycreds.store import Store + except ImportError as exc: + HAS_DOCKER_ERROR = str(exc) + NEEDS_DOCKER_PYCREDS = True + + +if NEEDS_DOCKER_PYCREDS: + # docker-pycreds missing, so we need to create some place holder classes + # to allow instantiation. + + class StoreError(Exception): + pass + + class CredentialsNotFound(Exception): + pass + + +class DockerFileStore(object): + ''' + A custom credential store class that implements only the functionality we need to + update the docker config file when no credential helpers is provided. + ''' + + program = "<legacy config>" + + def __init__(self, config_path): + self._config_path = config_path + + # Make sure we have a minimal config if none is available. + self._config = dict( + auths=dict() + ) + + try: + # Attempt to read the existing config. + with open(self._config_path, "r") as f: + config = json.load(f) + except (ValueError, IOError): + # No config found or an invalid config found so we'll ignore it. + config = dict() + + # Update our internal config with what ever was loaded. + self._config.update(config) + + @property + def config_path(self): + ''' + Return the config path configured in this DockerFileStore instance. + ''' + + return self._config_path + + def get(self, server): + ''' + Retrieve credentials for `server` if there are any in the config file. + Otherwise raise a `StoreError` + ''' + + server_creds = self._config['auths'].get(server) + if not server_creds: + raise CredentialsNotFound('No matching credentials') + + (username, password) = decode_auth(server_creds['auth']) + + return dict( + Username=username, + Secret=password + ) + + def _write(self): + ''' + Write config back out to disk. + ''' + # Make sure directory exists + dir = os.path.dirname(self._config_path) + if not os.path.exists(dir): + os.makedirs(dir) + # Write config; make sure it has permissions 0x600 + content = json.dumps(self._config, indent=4, sort_keys=True).encode('utf-8') + f = os.open(self._config_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600) + try: + os.write(f, content) + finally: + os.close(f) + + def store(self, server, username, password): + ''' + Add a credentials for `server` to the current configuration. + ''' + + b64auth = base64.b64encode( + to_bytes(username) + b':' + to_bytes(password) + ) + auth = to_text(b64auth) + + # build up the auth structure + if 'auths' not in self._config: + self._config['auths'] = dict() + + self._config['auths'][server] = dict( + auth=auth + ) + + self._write() + + def erase(self, server): + ''' + Remove credentials for the given server from the configuration. + ''' + + if 'auths' in self._config and server in self._config['auths']: + self._config['auths'].pop(server) + self._write() + + +class LoginManager(DockerBaseClass): + + def __init__(self, client, results): + + super(LoginManager, self).__init__() + + self.client = client + self.results = results + parameters = self.client.module.params + self.check_mode = self.client.check_mode + + self.registry_url = parameters.get('registry_url') + self.username = parameters.get('username') + self.password = parameters.get('password') + self.email = parameters.get('email') + self.reauthorize = parameters.get('reauthorize') + self.config_path = parameters.get('config_path') + self.state = parameters.get('state') + + def run(self): + ''' + Do the actuall work of this task here. This allows instantiation for partial + testing. + ''' + + if self.state == 'present': + self.login() + else: + self.logout() + + def fail(self, msg): + self.client.fail(msg) + + def login(self): + ''' + Log into the registry with provided username/password. On success update the config + file with the new authorization. + + :return: None + ''' + + if self.email and not re.match(EMAIL_REGEX, self.email): + self.fail("Parameter error: the email address appears to be incorrect. Expecting it to match " + "/%s/" % (EMAIL_REGEX)) + + self.results['actions'].append("Logged into %s" % (self.registry_url)) + self.log("Log into %s with username %s" % (self.registry_url, self.username)) + try: + response = self.client.login( + self.username, + password=self.password, + email=self.email, + registry=self.registry_url, + reauth=self.reauthorize, + dockercfg_path=self.config_path + ) + except Exception as exc: + self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc))) + + # If user is already logged in, then response contains password for user + if 'password' in response: + # This returns correct password if user is logged in and wrong password is given. + # So if it returns another password as we passed, and the user didn't request to + # reauthorize, still do it. + if not self.reauthorize and response['password'] != self.password: + try: + response = self.client.login( + self.username, + password=self.password, + email=self.email, + registry=self.registry_url, + reauth=True, + dockercfg_path=self.config_path + ) + except Exception as exc: + self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc))) + response.pop('password', None) + self.results['login_result'] = response + + self.update_credentials() + + def logout(self): + ''' + Log out of the registry. On success update the config file. + + :return: None + ''' + + # Get the configuration store. + store = self.get_credential_store_instance(self.registry_url, self.config_path) + + try: + current = store.get(self.registry_url) + except CredentialsNotFound: + # get raises an exception on not found. + self.log("Credentials for %s not present, doing nothing." % (self.registry_url)) + self.results['changed'] = False + return + + if not self.check_mode: + store.erase(self.registry_url) + self.results['changed'] = True + + def update_credentials(self): + ''' + If the authorization is not stored attempt to store authorization values via + the appropriate credential helper or to the config file. + + :return: None + ''' + + # Check to see if credentials already exist. + store = self.get_credential_store_instance(self.registry_url, self.config_path) + + try: + current = store.get(self.registry_url) + except CredentialsNotFound: + # get raises an exception on not found. + current = dict( + Username='', + Secret='' + ) + + if current['Username'] != self.username or current['Secret'] != self.password or self.reauthorize: + if not self.check_mode: + store.store(self.registry_url, self.username, self.password) + self.log("Writing credentials to configured helper %s for %s" % (store.program, self.registry_url)) + self.results['actions'].append("Wrote credentials to configured helper %s for %s" % ( + store.program, self.registry_url)) + self.results['changed'] = True + + def get_credential_store_instance(self, registry, dockercfg_path): + ''' + Return an instance of docker.credentials.Store used by the given registry. + + :return: A Store or None + :rtype: Union[docker.credentials.Store, NoneType] + ''' + + # Older versions of docker-py don't have this feature. + try: + credstore_env = self.client.credstore_env + except AttributeError: + credstore_env = None + + config = auth.load_config(config_path=dockercfg_path) + + if hasattr(auth, 'get_credential_store'): + store_name = auth.get_credential_store(config, registry) + elif 'credsStore' in config: + store_name = config['credsStore'] + else: + store_name = None + + # Make sure that there is a credential helper before trying to instantiate a + # Store object. + if store_name: + self.log("Found credential store %s" % store_name) + return Store(store_name, environment=credstore_env) + + return DockerFileStore(dockercfg_path) + + +def main(): + + argument_spec = dict( + registry_url=dict(type='str', default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']), + username=dict(type='str'), + password=dict(type='str', no_log=True), + # Was Ansible 2.14 / community.general 3.0.0: + email=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.docker'), + reauthorize=dict(type='bool', default=False, aliases=['reauth']), + state=dict(type='str', default='present', choices=['present', 'absent']), + config_path=dict(type='path', default='~/.docker/config.json', aliases=['dockercfg_path']), + ) + + required_if = [ + ('state', 'present', ['username', 'password']), + ] + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + min_docker_api_version='1.20', + ) + + try: + results = dict( + changed=False, + actions=[], + login_result={} + ) + + manager = LoginManager(client, results) + manager.run() + + if 'actions' in results: + del results['actions'] + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_network.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_network.py new file mode 100644 index 00000000..d2ecf39c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_network.py @@ -0,0 +1,672 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: docker_network +short_description: Manage Docker networks +description: + - Create/remove Docker networks and connect containers to them. + - Performs largely the same function as the "docker network" CLI subcommand. +options: + name: + description: + - Name of the network to operate on. + type: str + required: yes + aliases: + - network_name + + connected: + description: + - List of container names or container IDs to connect to a network. + - Please note that the module only makes sure that these containers are connected to the network, + but does not care about connection options. If you rely on specific IP addresses etc., use the + M(community.docker.docker_container) module to ensure your containers are correctly connected to this network. + type: list + elements: str + aliases: + - containers + + driver: + description: + - Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used. + type: str + default: bridge + + driver_options: + description: + - Dictionary of network settings. Consult docker docs for valid options and values. + type: dict + + force: + description: + - With state C(absent) forces disconnecting all containers from the + network prior to deleting the network. With state C(present) will + disconnect all containers, delete the network and re-create the + network. + - This option is required if you have changed the IPAM or driver options + and want an existing network to be updated to use the new options. + type: bool + default: no + + appends: + description: + - By default the connected list is canonical, meaning containers not on the list are removed from the network. + - Use I(appends) to leave existing containers connected. + type: bool + default: no + aliases: + - incremental + + enable_ipv6: + description: + - Enable IPv6 networking. + type: bool + + ipam_driver: + description: + - Specify an IPAM driver. + type: str + + ipam_driver_options: + description: + - Dictionary of IPAM driver options. + type: dict + + ipam_config: + description: + - List of IPAM config blocks. Consult + L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values. + Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python). + type: list + elements: dict + suboptions: + subnet: + description: + - IP subset in CIDR notation. + type: str + iprange: + description: + - IP address range in CIDR notation. + type: str + gateway: + description: + - IP gateway address. + type: str + aux_addresses: + description: + - Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP. + type: dict + + state: + description: + - C(absent) deletes the network. If a network has connected containers, it + cannot be deleted. Use the I(force) option to disconnect all containers + and delete the network. + - C(present) creates the network, if it does not already exist with the + specified parameters, and connects the list of containers provided via + the connected parameter. Containers not on the list will be disconnected. + An empty list will leave no containers connected to the network. Use the + I(appends) option to leave existing containers connected. Use the I(force) + options to force re-creation of the network. + type: str + default: present + choices: + - absent + - present + + internal: + description: + - Restrict external access to the network. + type: bool + + labels: + description: + - Dictionary of labels. + type: dict + + scope: + description: + - Specify the network's scope. + type: str + choices: + - local + - global + - swarm + + attachable: + description: + - If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network. + type: bool + +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + + +notes: + - When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network. + It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific + connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the + network, loop the M(community.docker.docker_container) module to loop over your containers to make sure they are connected properly. + - The module does not support Docker Swarm, i.e. it will not try to disconnect or reconnect services. If services are connected to the + network, deleting the network will fail. When network options are changed, the network has to be deleted and recreated, so this will + fail as well. + +author: + - "Ben Keith (@keitwb)" + - "Chris Houseknecht (@chouseknecht)" + - "Dave Bendit (@DBendit)" + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "The docker server >= 1.10.0" +''' + +EXAMPLES = ''' +- name: Create a network + community.docker.docker_network: + name: network_one + +- name: Remove all but selected list of containers + community.docker.docker_network: + name: network_one + connected: + - container_a + - container_b + - container_c + +- name: Remove a single container + community.docker.docker_network: + name: network_one + connected: "{{ fulllist|difference(['container_a']) }}" + +- name: Add a container to a network, leaving existing containers connected + community.docker.docker_network: + name: network_one + connected: + - container_a + appends: yes + +- name: Create a network with driver options + community.docker.docker_network: + name: network_two + driver_options: + com.docker.network.bridge.name: net2 + +- name: Create a network with custom IPAM config + community.docker.docker_network: + name: network_three + ipam_config: + - subnet: 172.3.27.0/24 + gateway: 172.3.27.2 + iprange: 172.3.27.0/26 + aux_addresses: + host1: 172.3.27.3 + host2: 172.3.27.4 + +- name: Create a network with labels + community.docker.docker_network: + name: network_four + labels: + key1: value1 + key2: value2 + +- name: Create a network with IPv6 IPAM config + community.docker.docker_network: + name: network_ipv6_one + enable_ipv6: yes + ipam_config: + - subnet: fdd1:ac8c:0557:7ce1::/64 + +- name: Create a network with IPv6 and custom IPv4 IPAM config + community.docker.docker_network: + name: network_ipv6_two + enable_ipv6: yes + ipam_config: + - subnet: 172.4.27.0/24 + - subnet: fdd1:ac8c:0557:7ce2::/64 + +- name: Delete a network, disconnecting all containers + community.docker.docker_network: + name: network_one + state: absent + force: yes +''' + +RETURN = ''' +network: + description: + - Network inspection results for the affected network. + returned: success + type: dict + sample: {} +''' + +import re +import traceback + +from distutils.version import LooseVersion + +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClient, + DockerBaseClass, + docker_version, + DifferenceTracker, + clean_dict_booleans_for_docker_api, + RequestException, +) + +try: + from docker import utils + from docker.errors import DockerException + if LooseVersion(docker_version) >= LooseVersion('2.0.0'): + from docker.types import IPAMPool, IPAMConfig +except Exception: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + + +class TaskParameters(DockerBaseClass): + def __init__(self, client): + super(TaskParameters, self).__init__() + self.client = client + + self.name = None + self.connected = None + self.driver = None + self.driver_options = None + self.ipam_driver = None + self.ipam_driver_options = None + self.ipam_config = None + self.appends = None + self.force = None + self.internal = None + self.labels = None + self.debug = None + self.enable_ipv6 = None + self.scope = None + self.attachable = None + + for key, value in client.module.params.items(): + setattr(self, key, value) + + +def container_names_in_network(network): + return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else [] + + +CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$') +CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$') + + +def validate_cidr(cidr): + """Validate CIDR. Return IP version of a CIDR string on success. + + :param cidr: Valid CIDR + :type cidr: str + :return: ``ipv4`` or ``ipv6`` + :rtype: str + :raises ValueError: If ``cidr`` is not a valid CIDR + """ + if CIDR_IPV4.match(cidr): + return 'ipv4' + elif CIDR_IPV6.match(cidr): + return 'ipv6' + raise ValueError('"{0}" is not a valid CIDR'.format(cidr)) + + +def normalize_ipam_config_key(key): + """Normalizes IPAM config keys returned by Docker API to match Ansible keys. + + :param key: Docker API key + :type key: str + :return Ansible module key + :rtype str + """ + special_cases = { + 'AuxiliaryAddresses': 'aux_addresses' + } + return special_cases.get(key, key.lower()) + + +def dicts_are_essentially_equal(a, b): + """Make sure that a is a subset of b, where None entries of a are ignored.""" + for k, v in a.items(): + if v is None: + continue + if b.get(k) != v: + return False + return True + + +class DockerNetworkManager(object): + + def __init__(self, client): + self.client = client + self.parameters = TaskParameters(client) + self.check_mode = self.client.check_mode + self.results = { + u'changed': False, + u'actions': [] + } + self.diff = self.client.module._diff + self.diff_tracker = DifferenceTracker() + self.diff_result = dict() + + self.existing_network = self.get_existing_network() + + if not self.parameters.connected and self.existing_network: + self.parameters.connected = container_names_in_network(self.existing_network) + + if self.parameters.ipam_config: + try: + for ipam_config in self.parameters.ipam_config: + validate_cidr(ipam_config['subnet']) + except ValueError as e: + self.client.fail(str(e)) + + if self.parameters.driver_options: + self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options) + + state = self.parameters.state + if state == 'present': + self.present() + elif state == 'absent': + self.absent() + + if self.diff or self.check_mode or self.parameters.debug: + if self.diff: + self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after() + self.results['diff'] = self.diff_result + + def get_existing_network(self): + return self.client.get_network(name=self.parameters.name) + + def has_different_config(self, net): + ''' + Evaluates an existing network and returns a tuple containing a boolean + indicating if the configuration is different and a list of differences. + + :param net: the inspection output for an existing network + :return: (bool, list) + ''' + differences = DifferenceTracker() + if self.parameters.driver and self.parameters.driver != net['Driver']: + differences.add('driver', + parameter=self.parameters.driver, + active=net['Driver']) + if self.parameters.driver_options: + if not net.get('Options'): + differences.add('driver_options', + parameter=self.parameters.driver_options, + active=net.get('Options')) + else: + for key, value in self.parameters.driver_options.items(): + if not (key in net['Options']) or value != net['Options'][key]: + differences.add('driver_options.%s' % key, + parameter=value, + active=net['Options'].get(key)) + + if self.parameters.ipam_driver: + if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver: + differences.add('ipam_driver', + parameter=self.parameters.ipam_driver, + active=net.get('IPAM')) + + if self.parameters.ipam_driver_options is not None: + ipam_driver_options = net['IPAM'].get('Options') or {} + if ipam_driver_options != self.parameters.ipam_driver_options: + differences.add('ipam_driver_options', + parameter=self.parameters.ipam_driver_options, + active=ipam_driver_options) + + if self.parameters.ipam_config is not None and self.parameters.ipam_config: + if not net.get('IPAM') or not net['IPAM']['Config']: + differences.add('ipam_config', + parameter=self.parameters.ipam_config, + active=net.get('IPAM', {}).get('Config')) + else: + # Put network's IPAM config into the same format as module's IPAM config + net_ipam_configs = [] + for net_ipam_config in net['IPAM']['Config']: + config = dict() + for k, v in net_ipam_config.items(): + config[normalize_ipam_config_key(k)] = v + net_ipam_configs.append(config) + # Compare lists of dicts as sets of dicts + for idx, ipam_config in enumerate(self.parameters.ipam_config): + net_config = dict() + for net_ipam_config in net_ipam_configs: + if dicts_are_essentially_equal(ipam_config, net_ipam_config): + net_config = net_ipam_config + break + for key, value in ipam_config.items(): + if value is None: + # due to recursive argument_spec, all keys are always present + # (but have default value None if not specified) + continue + if value != net_config.get(key): + differences.add('ipam_config[%s].%s' % (idx, key), + parameter=value, + active=net_config.get(key)) + + if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False): + differences.add('enable_ipv6', + parameter=self.parameters.enable_ipv6, + active=net.get('EnableIPv6', False)) + + if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False): + differences.add('internal', + parameter=self.parameters.internal, + active=net.get('Internal')) + + if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'): + differences.add('scope', + parameter=self.parameters.scope, + active=net.get('Scope')) + + if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False): + differences.add('attachable', + parameter=self.parameters.attachable, + active=net.get('Attachable')) + if self.parameters.labels: + if not net.get('Labels'): + differences.add('labels', + parameter=self.parameters.labels, + active=net.get('Labels')) + else: + for key, value in self.parameters.labels.items(): + if not (key in net['Labels']) or value != net['Labels'][key]: + differences.add('labels.%s' % key, + parameter=value, + active=net['Labels'].get(key)) + + return not differences.empty, differences + + def create_network(self): + if not self.existing_network: + params = dict( + driver=self.parameters.driver, + options=self.parameters.driver_options, + ) + + ipam_pools = [] + if self.parameters.ipam_config: + for ipam_pool in self.parameters.ipam_config: + if LooseVersion(docker_version) >= LooseVersion('2.0.0'): + ipam_pools.append(IPAMPool(**ipam_pool)) + else: + ipam_pools.append(utils.create_ipam_pool(**ipam_pool)) + + if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools: + # Only add ipam parameter if a driver was specified or if IPAM parameters + # were specified. Leaving this parameter away can significantly speed up + # creation; on my machine creation with this option needs ~15 seconds, + # and without just a few seconds. + if LooseVersion(docker_version) >= LooseVersion('2.0.0'): + params['ipam'] = IPAMConfig(driver=self.parameters.ipam_driver, + pool_configs=ipam_pools, + options=self.parameters.ipam_driver_options) + else: + params['ipam'] = utils.create_ipam_config(driver=self.parameters.ipam_driver, + pool_configs=ipam_pools) + + if self.parameters.enable_ipv6 is not None: + params['enable_ipv6'] = self.parameters.enable_ipv6 + if self.parameters.internal is not None: + params['internal'] = self.parameters.internal + if self.parameters.scope is not None: + params['scope'] = self.parameters.scope + if self.parameters.attachable is not None: + params['attachable'] = self.parameters.attachable + if self.parameters.labels: + params['labels'] = self.parameters.labels + + if not self.check_mode: + resp = self.client.create_network(self.parameters.name, **params) + self.client.report_warnings(resp, ['Warning']) + self.existing_network = self.client.get_network(network_id=resp['Id']) + self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver)) + self.results['changed'] = True + + def remove_network(self): + if self.existing_network: + self.disconnect_all_containers() + if not self.check_mode: + self.client.remove_network(self.parameters.name) + self.results['actions'].append("Removed network %s" % (self.parameters.name,)) + self.results['changed'] = True + + def is_container_connected(self, container_name): + if not self.existing_network: + return False + return container_name in container_names_in_network(self.existing_network) + + def connect_containers(self): + for name in self.parameters.connected: + if not self.is_container_connected(name): + if not self.check_mode: + self.client.connect_container_to_network(name, self.parameters.name) + self.results['actions'].append("Connected container %s" % (name,)) + self.results['changed'] = True + self.diff_tracker.add('connected.{0}'.format(name), + parameter=True, + active=False) + + def disconnect_missing(self): + if not self.existing_network: + return + containers = self.existing_network['Containers'] + if not containers: + return + for c in containers.values(): + name = c['Name'] + if name not in self.parameters.connected: + self.disconnect_container(name) + + def disconnect_all_containers(self): + containers = self.client.get_network(name=self.parameters.name)['Containers'] + if not containers: + return + for cont in containers.values(): + self.disconnect_container(cont['Name']) + + def disconnect_container(self, container_name): + if not self.check_mode: + self.client.disconnect_container_from_network(container_name, self.parameters.name) + self.results['actions'].append("Disconnected container %s" % (container_name,)) + self.results['changed'] = True + self.diff_tracker.add('connected.{0}'.format(container_name), + parameter=False, + active=True) + + def present(self): + different = False + differences = DifferenceTracker() + if self.existing_network: + different, differences = self.has_different_config(self.existing_network) + + self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None) + if self.parameters.force or different: + self.remove_network() + self.existing_network = None + + self.create_network() + self.connect_containers() + if not self.parameters.appends: + self.disconnect_missing() + + if self.diff or self.check_mode or self.parameters.debug: + self.diff_result['differences'] = differences.get_legacy_docker_diffs() + self.diff_tracker.merge(differences) + + if not self.check_mode and not self.parameters.debug: + self.results.pop('actions') + + network_facts = self.get_existing_network() + self.results['network'] = network_facts + + def absent(self): + self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None) + self.remove_network() + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True, aliases=['network_name']), + connected=dict(type='list', default=[], elements='str', aliases=['containers']), + state=dict(type='str', default='present', choices=['present', 'absent']), + driver=dict(type='str', default='bridge'), + driver_options=dict(type='dict', default={}), + force=dict(type='bool', default=False), + appends=dict(type='bool', default=False, aliases=['incremental']), + ipam_driver=dict(type='str'), + ipam_driver_options=dict(type='dict'), + ipam_config=dict(type='list', elements='dict', options=dict( + subnet=dict(type='str'), + iprange=dict(type='str'), + gateway=dict(type='str'), + aux_addresses=dict(type='dict'), + )), + enable_ipv6=dict(type='bool'), + internal=dict(type='bool'), + labels=dict(type='dict', default={}), + debug=dict(type='bool', default=False), + scope=dict(type='str', choices=['local', 'global', 'swarm']), + attachable=dict(type='bool'), + ) + + option_minimal_versions = dict( + scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'), + labels=dict(docker_api_version='1.23'), + ipam_driver_options=dict(docker_py_version='2.0.0'), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='1.10.0', + min_docker_api_version='1.22', + # "The docker server >= 1.10.0" + option_minimal_versions=option_minimal_versions, + ) + + try: + cm = DockerNetworkManager(client) + client.module.exit_json(**cm.results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_network_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_network_info.py new file mode 100644 index 00000000..491ebf8b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_network_info.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_network_info + +short_description: Retrieves facts about docker network + +description: + - Retrieves facts about a docker network. + - Essentially returns the output of C(docker network inspect <name>), similar to what M(community.docker.docker_network) + returns for a non-absent network. + + +options: + name: + description: + - The name of the network to inspect. + - When identifying an existing network name may be a name or a long or short network ID. + type: str + required: yes +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + + +author: + - "Dave Bendit (@DBendit)" + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.21" +''' + +EXAMPLES = ''' +- name: Get infos on network + community.docker.docker_network_info: + name: mydata + register: result + +- name: Does network exist? + ansible.builtin.debug: + msg: "The network {{ 'exists' if result.exists else 'does not exist' }}" + +- name: Print information about network + ansible.builtin.debug: + var: result.network + when: result.exists +''' + +RETURN = ''' +exists: + description: + - Returns whether the network exists. + type: bool + returned: always + sample: true +network: + description: + - Facts representing the current state of the network. Matches the docker inspection output. + - Will be C(none) if network does not exist. + returned: always + type: dict + sample: '{ + "Attachable": false, + "ConfigFrom": { + "Network": "" + }, + "ConfigOnly": false, + "Containers": {}, + "Created": "2018-12-07T01:47:51.250835114-06:00", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Config": [ + { + "Gateway": "192.168.96.1", + "Subnet": "192.168.96.0/20" + } + ], + "Driver": "default", + "Options": null + }, + "Id": "0856968545f22026c41c2c7c3d448319d3b4a6a03a40b148b3ac4031696d1c0a", + "Ingress": false, + "Internal": false, + "Labels": {}, + "Name": "ansible-test-f2700bba", + "Options": {}, + "Scope": "local" + }' +''' + +import traceback + +try: + from docker.errors import DockerException +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClient, + RequestException, +) + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_api_version='1.21', + ) + + try: + network = client.get_network(client.module.params['name']) + + client.module.exit_json( + changed=False, + exists=(True if network else False), + network=network, + ) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_node.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_node.py new file mode 100644 index 00000000..d73b2d70 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_node.py @@ -0,0 +1,294 @@ +#!/usr/bin/python +# +# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: docker_node +short_description: Manage Docker Swarm node +description: + - Manages the Docker nodes via Swarm Manager. + - This module allows to change the node's role, its availability, and to modify, add or remove node labels. +options: + hostname: + description: + - The hostname or ID of node as registered in Swarm. + - If more than one node is registered using the same hostname the ID must be used, + otherwise module will fail. + type: str + required: yes + labels: + description: + - User-defined key/value metadata that will be assigned as node attribute. + - Label operations in this module apply to the docker swarm node specified by I(hostname). + Use M(community.docker.docker_swarm) module to add/modify/remove swarm cluster labels. + - The actual state of labels assigned to the node when module completes its work depends on + I(labels_state) and I(labels_to_remove) parameters values. See description below. + type: dict + labels_state: + description: + - It defines the operation on the labels assigned to node and labels specified in I(labels) option. + - Set to C(merge) to combine labels provided in I(labels) with those already assigned to the node. + If no labels are assigned then it will add listed labels. For labels that are already assigned + to the node, it will update their values. The labels not specified in I(labels) will remain unchanged. + If I(labels) is empty then no changes will be made. + - Set to C(replace) to replace all assigned labels with provided ones. If I(labels) is empty then + all labels assigned to the node will be removed. + type: str + default: 'merge' + choices: + - merge + - replace + labels_to_remove: + description: + - List of labels that will be removed from the node configuration. The list has to contain only label + names, not their values. + - If the label provided on the list is not assigned to the node, the entry is ignored. + - If the label is both on the I(labels_to_remove) and I(labels), then value provided in I(labels) remains + assigned to the node. + - If I(labels_state) is C(replace) and I(labels) is not provided or empty then all labels assigned to + node are removed and I(labels_to_remove) is ignored. + type: list + elements: str + availability: + description: Node availability to assign. If not provided then node availability remains unchanged. + choices: + - active + - pause + - drain + type: str + role: + description: Node role to assign. If not provided then node role remains unchanged. + choices: + - manager + - worker + type: str +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0" + - Docker API >= 1.25 +author: + - Piotr Wojciechowski (@WojciechowskiPiotr) + - Thierry Bouvet (@tbouvet) + +''' + +EXAMPLES = ''' +- name: Set node role + community.docker.docker_node: + hostname: mynode + role: manager + +- name: Set node availability + community.docker.docker_node: + hostname: mynode + availability: drain + +- name: Replace node labels with new labels + community.docker.docker_node: + hostname: mynode + labels: + key: value + labels_state: replace + +- name: Merge node labels and new labels + community.docker.docker_node: + hostname: mynode + labels: + key: value + +- name: Remove all labels assigned to node + community.docker.docker_node: + hostname: mynode + labels_state: replace + +- name: Remove selected labels from the node + community.docker.docker_node: + hostname: mynode + labels_to_remove: + - key1 + - key2 +''' + +RETURN = ''' +node: + description: Information about node after 'update' operation + returned: success + type: dict + +''' + +import traceback + +try: + from docker.errors import DockerException, APIError +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.docker.plugins.module_utils.common import ( + DockerBaseClass, + RequestException, +) + +from ansible.module_utils._text import to_native + +from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient + + +class TaskParameters(DockerBaseClass): + def __init__(self, client): + super(TaskParameters, self).__init__() + + # Spec + self.name = None + self.labels = None + self.labels_state = None + self.labels_to_remove = None + + # Node + self.availability = None + self.role = None + + for key, value in client.module.params.items(): + setattr(self, key, value) + + +class SwarmNodeManager(DockerBaseClass): + + def __init__(self, client, results): + + super(SwarmNodeManager, self).__init__() + + self.client = client + self.results = results + self.check_mode = self.client.check_mode + + self.client.fail_task_if_not_swarm_manager() + + self.parameters = TaskParameters(client) + + self.node_update() + + def node_update(self): + if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)): + self.client.fail("This node is not part of a swarm.") + return + + if self.client.check_if_swarm_node_is_down(): + self.client.fail("Can not update the node. The node is down.") + + try: + node_info = self.client.inspect_node(node_id=self.parameters.hostname) + except APIError as exc: + self.client.fail("Failed to get node information for %s" % to_native(exc)) + + changed = False + node_spec = dict( + Availability=self.parameters.availability, + Role=self.parameters.role, + Labels=self.parameters.labels, + ) + + if self.parameters.role is None: + node_spec['Role'] = node_info['Spec']['Role'] + else: + if not node_info['Spec']['Role'] == self.parameters.role: + node_spec['Role'] = self.parameters.role + changed = True + + if self.parameters.availability is None: + node_spec['Availability'] = node_info['Spec']['Availability'] + else: + if not node_info['Spec']['Availability'] == self.parameters.availability: + node_info['Spec']['Availability'] = self.parameters.availability + changed = True + + if self.parameters.labels_state == 'replace': + if self.parameters.labels is None: + node_spec['Labels'] = {} + if node_info['Spec']['Labels']: + changed = True + else: + if (node_info['Spec']['Labels'] or {}) != self.parameters.labels: + node_spec['Labels'] = self.parameters.labels + changed = True + elif self.parameters.labels_state == 'merge': + node_spec['Labels'] = dict(node_info['Spec']['Labels'] or {}) + if self.parameters.labels is not None: + for key, value in self.parameters.labels.items(): + if node_spec['Labels'].get(key) != value: + node_spec['Labels'][key] = value + changed = True + + if self.parameters.labels_to_remove is not None: + for key in self.parameters.labels_to_remove: + if self.parameters.labels is not None: + if not self.parameters.labels.get(key): + if node_spec['Labels'].get(key): + node_spec['Labels'].pop(key) + changed = True + else: + self.client.module.warn( + "Label '%s' listed both in 'labels' and 'labels_to_remove'. " + "Keeping the assigned label value." + % to_native(key)) + else: + if node_spec['Labels'].get(key): + node_spec['Labels'].pop(key) + changed = True + + if changed is True: + if not self.check_mode: + try: + self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'], + node_spec=node_spec) + except APIError as exc: + self.client.fail("Failed to update node : %s" % to_native(exc)) + self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID']) + self.results['changed'] = changed + else: + self.results['node'] = node_info + self.results['changed'] = changed + + +def main(): + argument_spec = dict( + hostname=dict(type='str', required=True), + labels=dict(type='dict'), + labels_state=dict(type='str', default='merge', choices=['merge', 'replace']), + labels_to_remove=dict(type='list', elements='str'), + availability=dict(type='str', choices=['active', 'pause', 'drain']), + role=dict(type='str', choices=['worker', 'manager']), + ) + + client = AnsibleDockerSwarmClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='2.4.0', + min_docker_api_version='1.25', + ) + + try: + results = dict( + changed=False, + ) + + SwarmNodeManager(client, results) + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_node_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_node_info.py new file mode 100644 index 00000000..d541588c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_node_info.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# +# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_node_info + +short_description: Retrieves facts about docker swarm node from Swarm Manager + +description: + - Retrieves facts about a docker node. + - Essentially returns the output of C(docker node inspect <name>). + - Must be executed on a host running as Swarm Manager, otherwise the module will fail. + + +options: + name: + description: + - The name of the node to inspect. + - The list of nodes names to inspect. + - If empty then return information of all nodes in Swarm cluster. + - When identifying the node use either the hostname of the node (as registered in Swarm) or node ID. + - If I(self) is C(true) then this parameter is ignored. + type: list + elements: str + self: + description: + - If C(true), queries the node (i.e. the docker daemon) the module communicates with. + - If C(true) then I(name) is ignored. + - If C(false) then query depends on I(name) presence and value. + type: bool + default: no +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + + +author: + - Piotr Wojciechowski (@WojciechowskiPiotr) + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0" + - "Docker API >= 1.24" +''' + +EXAMPLES = ''' +- name: Get info on all nodes + community.docker.docker_node_info: + register: result + +- name: Get info on node + community.docker.docker_node_info: + name: mynode + register: result + +- name: Get info on list of nodes + community.docker.docker_node_info: + name: + - mynode1 + - mynode2 + register: result + +- name: Get info on host if it is Swarm Manager + community.docker.docker_node_info: + self: true + register: result +''' + +RETURN = ''' +nodes: + description: + - Facts representing the current state of the nodes. Matches the C(docker node inspect) output. + - Can contain multiple entries if more than one node provided in I(name), or I(name) is not provided. + - If I(name) contains a list of nodes, the output will provide information on all nodes registered + at the swarm, including nodes that left the swarm but haven't been removed from the cluster on swarm + managers and nodes that are unreachable. + returned: always + type: list + elements: dict +''' + +import traceback + +from ansible_collections.community.docker.plugins.module_utils.common import ( + RequestException, +) +from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient + +try: + from docker.errors import DockerException +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + + +def get_node_facts(client): + + results = [] + + if client.module.params['self'] is True: + self_node_id = client.get_swarm_node_id() + node_info = client.get_node_inspect(node_id=self_node_id) + results.append(node_info) + return results + + if client.module.params['name'] is None: + node_info = client.get_all_nodes_inspect() + return node_info + + nodes = client.module.params['name'] + if not isinstance(nodes, list): + nodes = [nodes] + + for next_node_name in nodes: + next_node_info = client.get_node_inspect(node_id=next_node_name, skip_missing=True) + if next_node_info: + results.append(next_node_info) + return results + + +def main(): + argument_spec = dict( + name=dict(type='list', elements='str'), + self=dict(type='bool', default=False), + ) + + client = AnsibleDockerSwarmClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='2.4.0', + min_docker_api_version='1.24', + ) + + client.fail_task_if_not_swarm_manager() + + try: + nodes = get_node_facts(client) + + client.module.exit_json( + changed=False, + nodes=nodes, + ) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_prune.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_prune.py new file mode 100644 index 00000000..8fcb4094 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_prune.py @@ -0,0 +1,265 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_prune + +short_description: Allows to prune various docker objects + +description: + - Allows to run C(docker container prune), C(docker image prune), C(docker network prune) + and C(docker volume prune) via the Docker API. + + +options: + containers: + description: + - Whether to prune containers. + type: bool + default: no + containers_filters: + description: + - A dictionary of filter values used for selecting containers to delete. + - "For example, C(until: 24h)." + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering) + for more information on possible filters. + type: dict + images: + description: + - Whether to prune images. + type: bool + default: no + images_filters: + description: + - A dictionary of filter values used for selecting images to delete. + - "For example, C(dangling: true)." + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering) + for more information on possible filters. + type: dict + networks: + description: + - Whether to prune networks. + type: bool + default: no + networks_filters: + description: + - A dictionary of filter values used for selecting networks to delete. + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering) + for more information on possible filters. + type: dict + volumes: + description: + - Whether to prune volumes. + type: bool + default: no + volumes_filters: + description: + - A dictionary of filter values used for selecting volumes to delete. + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering) + for more information on possible filters. + type: dict + builder_cache: + description: + - Whether to prune the builder cache. + - Requires version 3.3.0 of the Docker SDK for Python or newer. + type: bool + default: no + +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_2_documentation + + +author: + - "Felix Fontein (@felixfontein)" + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0" + - "Docker API >= 1.25" +''' + +EXAMPLES = ''' +- name: Prune containers older than 24h + community.docker.docker_prune: + containers: yes + containers_filters: + # only consider containers created more than 24 hours ago + until: 24h + +- name: Prune everything + community.docker.docker_prune: + containers: yes + images: yes + networks: yes + volumes: yes + builder_cache: yes + +- name: Prune everything (including non-dangling images) + community.docker.docker_prune: + containers: yes + images: yes + images_filters: + dangling: false + networks: yes + volumes: yes + builder_cache: yes +''' + +RETURN = ''' +# containers +containers: + description: + - List of IDs of deleted containers. + returned: I(containers) is C(true) + type: list + elements: str + sample: '[]' +containers_space_reclaimed: + description: + - Amount of reclaimed disk space from container pruning in bytes. + returned: I(containers) is C(true) + type: int + sample: '0' + +# images +images: + description: + - List of IDs of deleted images. + returned: I(images) is C(true) + type: list + elements: str + sample: '[]' +images_space_reclaimed: + description: + - Amount of reclaimed disk space from image pruning in bytes. + returned: I(images) is C(true) + type: int + sample: '0' + +# networks +networks: + description: + - List of IDs of deleted networks. + returned: I(networks) is C(true) + type: list + elements: str + sample: '[]' + +# volumes +volumes: + description: + - List of IDs of deleted volumes. + returned: I(volumes) is C(true) + type: list + elements: str + sample: '[]' +volumes_space_reclaimed: + description: + - Amount of reclaimed disk space from volumes pruning in bytes. + returned: I(volumes) is C(true) + type: int + sample: '0' + +# builder_cache +builder_cache_space_reclaimed: + description: + - Amount of reclaimed disk space from builder cache pruning in bytes. + returned: I(builder_cache) is C(true) + type: int + sample: '0' +''' + +import traceback + +try: + from docker.errors import DockerException +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from distutils.version import LooseVersion + +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClient, + RequestException, +) + +try: + from ansible_collections.community.docker.plugins.module_utils.common import docker_version, clean_dict_booleans_for_docker_api +except Exception as dummy: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + + +def main(): + argument_spec = dict( + containers=dict(type='bool', default=False), + containers_filters=dict(type='dict'), + images=dict(type='bool', default=False), + images_filters=dict(type='dict'), + networks=dict(type='bool', default=False), + networks_filters=dict(type='dict'), + volumes=dict(type='bool', default=False), + volumes_filters=dict(type='dict'), + builder_cache=dict(type='bool', default=False), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + # supports_check_mode=True, + min_docker_api_version='1.25', + min_docker_version='2.1.0', + ) + + # Version checks + cache_min_version = '3.3.0' + if client.module.params['builder_cache'] and client.docker_py_version < LooseVersion(cache_min_version): + msg = "Error: Docker SDK for Python's version is %s. Minimum version required for builds option is %s. Use `pip install --upgrade docker` to upgrade." + client.fail(msg % (docker_version, cache_min_version)) + + try: + result = dict() + + if client.module.params['containers']: + filters = clean_dict_booleans_for_docker_api(client.module.params.get('containers_filters')) + res = client.prune_containers(filters=filters) + result['containers'] = res.get('ContainersDeleted') or [] + result['containers_space_reclaimed'] = res['SpaceReclaimed'] + + if client.module.params['images']: + filters = clean_dict_booleans_for_docker_api(client.module.params.get('images_filters')) + res = client.prune_images(filters=filters) + result['images'] = res.get('ImagesDeleted') or [] + result['images_space_reclaimed'] = res['SpaceReclaimed'] + + if client.module.params['networks']: + filters = clean_dict_booleans_for_docker_api(client.module.params.get('networks_filters')) + res = client.prune_networks(filters=filters) + result['networks'] = res.get('NetworksDeleted') or [] + + if client.module.params['volumes']: + filters = clean_dict_booleans_for_docker_api(client.module.params.get('volumes_filters')) + res = client.prune_volumes(filters=filters) + result['volumes'] = res.get('VolumesDeleted') or [] + result['volumes_space_reclaimed'] = res['SpaceReclaimed'] + + if client.module.params['builder_cache']: + res = client.prune_builds() + result['builder_cache_space_reclaimed'] = res['SpaceReclaimed'] + + client.module.exit_json(**result) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_secret.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_secret.py new file mode 100644 index 00000000..57df6e66 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_secret.py @@ -0,0 +1,302 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_secret + +short_description: Manage docker secrets. + + +description: + - Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm). + - Adds to the metadata of new secrets 'ansible_key', an encrypted hash representation of the data, which is then used + in future runs to test if a secret has changed. If 'ansible_key is not present, then a secret will not be updated + unless the I(force) option is set. + - Updates to secrets are performed by removing the secret and creating it again. +options: + data: + description: + - The value of the secret. Required when state is C(present). + type: str + data_is_b64: + description: + - If set to C(true), the data is assumed to be Base64 encoded and will be + decoded before being used. + - To use binary I(data), it is better to keep it Base64 encoded and let it + be decoded by this option. + type: bool + default: no + labels: + description: + - "A map of key:value meta data, where both key and value are expected to be strings." + - If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again. + type: dict + force: + description: + - Use with state C(present) to always remove and recreate an existing secret. + - If C(true), an existing secret will be replaced, even if it has not changed. + type: bool + default: no + name: + description: + - The name of the secret. + type: str + required: yes + state: + description: + - Set to C(present), if the secret should exist, and C(absent), if it should not. + type: str + default: present + choices: + - absent + - present + +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_2_documentation + + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0" + - "Docker API >= 1.25" + +author: + - Chris Houseknecht (@chouseknecht) +''' + +EXAMPLES = ''' + +- name: Create secret foo (from a file on the control machine) + community.docker.docker_secret: + name: foo + # If the file is JSON or binary, Ansible might modify it (because + # it is first decoded and later re-encoded). Base64-encoding the + # file directly after reading it prevents this to happen. + data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}" + data_is_b64: true + state: present + +- name: Change the secret data + community.docker.docker_secret: + name: foo + data: Goodnight everyone! + labels: + bar: baz + one: '1' + state: present + +- name: Add a new label + community.docker.docker_secret: + name: foo + data: Goodnight everyone! + labels: + bar: baz + one: '1' + # Adding a new label will cause a remove/create of the secret + two: '2' + state: present + +- name: No change + community.docker.docker_secret: + name: foo + data: Goodnight everyone! + labels: + bar: baz + one: '1' + # Even though 'two' is missing, there is no change to the existing secret + state: present + +- name: Update an existing label + community.docker.docker_secret: + name: foo + data: Goodnight everyone! + labels: + bar: monkey # Changing a label will cause a remove/create of the secret + one: '1' + state: present + +- name: Force the removal/creation of the secret + community.docker.docker_secret: + name: foo + data: Goodnight everyone! + force: yes + state: present + +- name: Remove secret foo + community.docker.docker_secret: + name: foo + state: absent +''' + +RETURN = ''' +secret_id: + description: + - The ID assigned by Docker to the secret object. + returned: success and I(state) is C(present) + type: str + sample: 'hzehrmyjigmcp2gb6nlhmjqcv' +''' + +import base64 +import hashlib +import traceback + +try: + from docker.errors import DockerException, APIError +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClient, + DockerBaseClass, + compare_generic, + RequestException, +) +from ansible.module_utils._text import to_native, to_bytes + + +class SecretManager(DockerBaseClass): + + def __init__(self, client, results): + + super(SecretManager, self).__init__() + + self.client = client + self.results = results + self.check_mode = self.client.check_mode + + parameters = self.client.module.params + self.name = parameters.get('name') + self.state = parameters.get('state') + self.data = parameters.get('data') + if self.data is not None: + if parameters.get('data_is_b64'): + self.data = base64.b64decode(self.data) + else: + self.data = to_bytes(self.data) + self.labels = parameters.get('labels') + self.force = parameters.get('force') + self.data_key = None + + def __call__(self): + if self.state == 'present': + self.data_key = hashlib.sha224(self.data).hexdigest() + self.present() + elif self.state == 'absent': + self.absent() + + def get_secret(self): + ''' Find an existing secret. ''' + try: + secrets = self.client.secrets(filters={'name': self.name}) + except APIError as exc: + self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc))) + + for secret in secrets: + if secret['Spec']['Name'] == self.name: + return secret + return None + + def create_secret(self): + ''' Create a new secret ''' + secret_id = None + # We can't see the data after creation, so adding a label we can use for idempotency check + labels = { + 'ansible_key': self.data_key + } + if self.labels: + labels.update(self.labels) + + try: + if not self.check_mode: + secret_id = self.client.create_secret(self.name, self.data, labels=labels) + except APIError as exc: + self.client.fail("Error creating secret: %s" % to_native(exc)) + + if isinstance(secret_id, dict): + secret_id = secret_id['ID'] + + return secret_id + + def present(self): + ''' Handles state == 'present', creating or updating the secret ''' + secret = self.get_secret() + if secret: + self.results['secret_id'] = secret['ID'] + data_changed = False + attrs = secret.get('Spec', {}) + if attrs.get('Labels', {}).get('ansible_key'): + if attrs['Labels']['ansible_key'] != self.data_key: + data_changed = True + else: + if not self.force: + self.client.module.warn("'ansible_key' label not found. Secret will not be changed unless the force parameter is set to 'yes'") + labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict') + if data_changed or labels_changed or self.force: + # if something changed or force, delete and re-create the secret + self.absent() + secret_id = self.create_secret() + self.results['changed'] = True + self.results['secret_id'] = secret_id + else: + self.results['changed'] = True + self.results['secret_id'] = self.create_secret() + + def absent(self): + ''' Handles state == 'absent', removing the secret ''' + secret = self.get_secret() + if secret: + try: + if not self.check_mode: + self.client.remove_secret(secret['ID']) + except APIError as exc: + self.client.fail("Error removing secret %s: %s" % (self.name, to_native(exc))) + self.results['changed'] = True + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + data=dict(type='str', no_log=True), + data_is_b64=dict(type='bool', default=False), + labels=dict(type='dict'), + force=dict(type='bool', default=False) + ) + + required_if = [ + ('state', 'present', ['data']) + ] + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + min_docker_version='2.1.0', + min_docker_api_version='1.25', + ) + + try: + results = dict( + changed=False, + secret_id='' + ) + + SecretManager(client, results)() + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack.py new file mode 100644 index 00000000..9688b6fd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack.py @@ -0,0 +1,308 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018 Dario Zanzico (git@dariozanzico.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_stack +author: "Dario Zanzico (@dariko)" +short_description: docker stack module +description: + - Manage docker stacks using the 'docker stack' command + on the target node (see examples). +options: + name: + description: + - Stack name + type: str + required: yes + state: + description: + - Service state. + type: str + default: "present" + choices: + - present + - absent + compose: + description: + - List of compose definitions. Any element may be a string + referring to the path of the compose file on the target host + or the YAML contents of a compose file nested as dictionary. + type: list + elements: raw + default: [] + prune: + description: + - If true will add the C(--prune) option to the C(docker stack deploy) command. + This will have docker remove the services not present in the + current stack definition. + type: bool + default: no + with_registry_auth: + description: + - If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command. + This will have docker send registry authentication details to Swarm agents. + type: bool + default: no + resolve_image: + description: + - If set will add the C(--resolve-image) option to the C(docker stack deploy) command. + This will have docker query the registry to resolve image digest and + supported platforms. If not set, docker use "always" by default. + type: str + choices: ["always", "changed", "never"] + absent_retries: + description: + - If C(>0) and I(state) is C(absent) the module will retry up to + I(absent_retries) times to delete the stack until all the + resources have been effectively deleted. + If the last try still reports the stack as not completely + removed the module will fail. + type: int + default: 0 + absent_retries_interval: + description: + - Interval in seconds between consecutive I(absent_retries). + type: int + default: 1 + +requirements: + - jsondiff + - pyyaml + +notes: + - Return values I(out) and I(err) have been deprecated and will be removed in community.docker 2.0.0. Use I(stdout) and I(stderr) instead. +''' + +RETURN = ''' +stack_spec_diff: + description: | + dictionary containing the differences between the 'Spec' field + of the stack services before and after applying the new stack + definition. + sample: > + "stack_spec_diff": + {'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}} + returned: on change + type: dict +''' + +EXAMPLES = ''' + - name: Deploy stack from a compose file + community.docker.docker_stack: + state: present + name: mystack + compose: + - /opt/docker-compose.yml + + - name: Deploy stack from base compose file and override the web service + community.docker.docker_stack: + state: present + name: mystack + compose: + - /opt/docker-compose.yml + - version: '3' + services: + web: + image: nginx:latest + environment: + ENVVAR: envvar + + - name: Remove stack + community.docker.docker_stack: + name: mystack + state: absent +''' + + +import json +import tempfile +from ansible.module_utils.six import string_types +from time import sleep + +try: + from jsondiff import diff as json_diff + HAS_JSONDIFF = True +except ImportError: + HAS_JSONDIFF = False + +try: + from yaml import dump as yaml_dump + HAS_YAML = True +except ImportError: + HAS_YAML = False + +from ansible.module_utils.basic import AnsibleModule, os + + +def docker_stack_services(module, stack_name): + docker_bin = module.get_bin_path('docker', required=True) + rc, out, err = module.run_command([docker_bin, + "stack", + "services", + stack_name, + "--format", + "{{.Name}}"]) + if err == "Nothing found in stack: %s\n" % stack_name: + return [] + return out.strip().split('\n') + + +def docker_service_inspect(module, service_name): + docker_bin = module.get_bin_path('docker', required=True) + rc, out, err = module.run_command([docker_bin, + "service", + "inspect", + service_name]) + if rc != 0: + return None + else: + ret = json.loads(out)[0]['Spec'] + return ret + + +def docker_stack_deploy(module, stack_name, compose_files): + docker_bin = module.get_bin_path('docker', required=True) + command = [docker_bin, "stack", "deploy"] + if module.params["prune"]: + command += ["--prune"] + if module.params["with_registry_auth"]: + command += ["--with-registry-auth"] + if module.params["resolve_image"]: + command += ["--resolve-image", + module.params["resolve_image"]] + for compose_file in compose_files: + command += ["--compose-file", + compose_file] + command += [stack_name] + return module.run_command(command) + + +def docker_stack_inspect(module, stack_name): + ret = {} + for service_name in docker_stack_services(module, stack_name): + ret[service_name] = docker_service_inspect(module, service_name) + return ret + + +def docker_stack_rm(module, stack_name, retries, interval): + docker_bin = module.get_bin_path('docker', required=True) + command = [docker_bin, "stack", "rm", stack_name] + + rc, out, err = module.run_command(command) + + while err != "Nothing found in stack: %s\n" % stack_name and retries > 0: + sleep(interval) + retries = retries - 1 + rc, out, err = module.run_command(command) + return rc, out, err + + +def main(): + module = AnsibleModule( + argument_spec={ + 'name': dict(type='str', required=True), + 'compose': dict(type='list', elements='raw', default=[]), + 'prune': dict(type='bool', default=False), + 'with_registry_auth': dict(type='bool', default=False), + 'resolve_image': dict(type='str', choices=['always', 'changed', 'never']), + 'state': dict(type='str', default='present', choices=['present', 'absent']), + 'absent_retries': dict(type='int', default=0), + 'absent_retries_interval': dict(type='int', default=1) + }, + supports_check_mode=False + ) + + if not HAS_JSONDIFF: + return module.fail_json(msg="jsondiff is not installed, try 'pip install jsondiff'") + + if not HAS_YAML: + return module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'") + + state = module.params['state'] + compose = module.params['compose'] + name = module.params['name'] + absent_retries = module.params['absent_retries'] + absent_retries_interval = module.params['absent_retries_interval'] + + if state == 'present': + if not compose: + module.fail_json(msg=("compose parameter must be a list " + "containing at least one element")) + + compose_files = [] + for i, compose_def in enumerate(compose): + if isinstance(compose_def, dict): + compose_file_fd, compose_file = tempfile.mkstemp() + module.add_cleanup_file(compose_file) + with os.fdopen(compose_file_fd, 'w') as stack_file: + compose_files.append(compose_file) + stack_file.write(yaml_dump(compose_def)) + elif isinstance(compose_def, string_types): + compose_files.append(compose_def) + else: + module.fail_json(msg="compose element '%s' must be a " + + "string or a dictionary" % compose_def) + + before_stack_services = docker_stack_inspect(module, name) + + rc, out, err = docker_stack_deploy(module, name, compose_files) + + after_stack_services = docker_stack_inspect(module, name) + + if rc != 0: + module.fail_json(msg="docker stack up deploy command failed", + rc=rc, + out=out, err=err, # Deprecated + stdout=out, stderr=err) + + before_after_differences = json_diff(before_stack_services, + after_stack_services) + for k in before_after_differences.keys(): + if isinstance(before_after_differences[k], dict): + before_after_differences[k].pop('UpdatedAt', None) + before_after_differences[k].pop('Version', None) + if not list(before_after_differences[k].keys()): + before_after_differences.pop(k) + + if not before_after_differences: + module.exit_json( + changed=False, + rc=rc, + stdout=out, + stderr=err) + else: + module.exit_json( + changed=True, + rc=rc, + stdout=out, + stderr=err, + stack_spec_diff=json_diff(before_stack_services, + after_stack_services, + dump=True)) + + else: + if docker_stack_services(module, name): + rc, out, err = docker_stack_rm(module, name, absent_retries, absent_retries_interval) + if rc != 0: + module.fail_json(msg="'docker stack down' command failed", + rc=rc, + out=out, err=err, # Deprecated + stdout=out, stderr=err) + else: + module.exit_json(changed=True, + msg=out, rc=rc, + err=err, # Deprecated + stdout=out, stderr=err) + module.exit_json(changed=False) + + +if __name__ == "__main__": + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack_info.py new file mode 100644 index 00000000..32f59ac5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack_info.py @@ -0,0 +1,83 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: docker_stack_info +author: "Jose Angel Munoz (@imjoseangel)" +short_description: Return information on a docker stack +description: + - Retrieve information on docker stacks using the C(docker stack) command + on the target node (see examples). +''' + +RETURN = ''' +results: + description: | + List of dictionaries containing the list of stacks or tasks associated + to a stack name. + sample: > + "results": [{"name":"grafana","namespace":"default","orchestrator":"Kubernetes","services":"2"}] + returned: always + type: list +''' + +EXAMPLES = ''' + - name: Shows stack info + community.docker.docker_stack_info: + register: result + + - name: Show results + ansible.builtin.debug: + var: result.results +''' + +import json +from ansible.module_utils.basic import AnsibleModule + + +def docker_stack_list(module): + docker_bin = module.get_bin_path('docker', required=True) + rc, out, err = module.run_command( + [docker_bin, "stack", "ls", "--format={{json .}}"]) + + return rc, out.strip(), err.strip() + + +def main(): + module = AnsibleModule( + argument_spec={ + }, + supports_check_mode=False + ) + + rc, out, err = docker_stack_list(module) + + if rc != 0: + module.fail_json(msg="Error running docker stack. {0}".format(err), + rc=rc, stdout=out, stderr=err) + else: + if out: + ret = list( + json.loads(outitem) + for outitem in out.splitlines()) + + else: + ret = [] + + module.exit_json(changed=False, + rc=rc, + stdout=out, + stderr=err, + results=ret) + + +if __name__ == "__main__": + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py new file mode 100644 index 00000000..db6f03f4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py @@ -0,0 +1,94 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: docker_stack_task_info +author: "Jose Angel Munoz (@imjoseangel)" +short_description: Return information of the tasks on a docker stack +description: + - Retrieve information on docker stacks tasks using the C(docker stack) command + on the target node (see examples). +options: + name: + description: + - Stack name. + type: str + required: yes +''' + +RETURN = ''' +results: + description: | + List of dictionaries containing the list of tasks associated + to a stack name. + sample: > + [{"CurrentState":"Running","DesiredState":"Running","Error":"","ID":"7wqv6m02ugkw","Image":"busybox","Name":"test_stack.1","Node":"swarm","Ports":""}] + returned: always + type: list + elements: dict +''' + +EXAMPLES = ''' + - name: Shows stack info + community.docker.docker_stack_task_info: + name: test_stack + register: result + + - name: Show results + ansible.builtin.debug: + var: result.results +''' + +import json +from ansible.module_utils.basic import AnsibleModule + + +def docker_stack_task(module, stack_name): + docker_bin = module.get_bin_path('docker', required=True) + rc, out, err = module.run_command( + [docker_bin, "stack", "ps", stack_name, "--format={{json .}}"]) + + return rc, out.strip(), err.strip() + + +def main(): + module = AnsibleModule( + argument_spec={ + 'name': dict(type='str', required=True) + }, + supports_check_mode=False + ) + + name = module.params['name'] + + rc, out, err = docker_stack_task(module, name) + + if rc != 0: + module.fail_json(msg="Error running docker stack. {0}".format(err), + rc=rc, stdout=out, stderr=err) + else: + if out: + ret = list( + json.loads(outitem) + for outitem in out.splitlines()) + + else: + ret = [] + + module.exit_json(changed=False, + rc=rc, + stdout=out, + stderr=err, + results=ret) + + +if __name__ == "__main__": + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm.py new file mode 100644 index 00000000..5cd2b91b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm.py @@ -0,0 +1,662 @@ +#!/usr/bin/python + +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: docker_swarm +short_description: Manage Swarm cluster +description: + - Create a new Swarm cluster. + - Add/Remove nodes or managers to an existing cluster. +options: + advertise_addr: + description: + - Externally reachable address advertised to other nodes. + - This can either be an address/port combination + in the form C(192.168.1.1:4567), or an interface followed by a + port number, like C(eth0:4567). + - If the port number is omitted, + the port number from the listen address is used. + - If I(advertise_addr) is not specified, it will be automatically + detected when possible. + - Only used when swarm is initialised or joined. Because of this it's not + considered for idempotency checking. + type: str + default_addr_pool: + description: + - Default address pool in CIDR format. + - Only used when swarm is initialised. Because of this it's not considered + for idempotency checking. + - Requires API version >= 1.39. + type: list + elements: str + subnet_size: + description: + - Default address pool subnet mask length. + - Only used when swarm is initialised. Because of this it's not considered + for idempotency checking. + - Requires API version >= 1.39. + type: int + listen_addr: + description: + - Listen address used for inter-manager communication. + - This can either be an address/port combination in the form + C(192.168.1.1:4567), or an interface followed by a port number, + like C(eth0:4567). + - If the port number is omitted, the default swarm listening port + is used. + - Only used when swarm is initialised or joined. Because of this it's not + considered for idempotency checking. + type: str + default: 0.0.0.0:2377 + force: + description: + - Use with state C(present) to force creating a new Swarm, even if already part of one. + - Use with state C(absent) to Leave the swarm even if this node is a manager. + type: bool + default: no + state: + description: + - Set to C(present), to create/update a new cluster. + - Set to C(join), to join an existing cluster. + - Set to C(absent), to leave an existing cluster. + - Set to C(remove), to remove an absent node from the cluster. + Note that removing requires Docker SDK for Python >= 2.4.0. + type: str + default: present + choices: + - present + - join + - absent + - remove + node_id: + description: + - Swarm id of the node to remove. + - Used with I(state=remove). + type: str + join_token: + description: + - Swarm token used to join a swarm cluster. + - Used with I(state=join). + type: str + remote_addrs: + description: + - Remote address of one or more manager nodes of an existing Swarm to connect to. + - Used with I(state=join). + type: list + elements: str + task_history_retention_limit: + description: + - Maximum number of tasks history stored. + - Docker default value is C(5). + type: int + snapshot_interval: + description: + - Number of logs entries between snapshot. + - Docker default value is C(10000). + type: int + keep_old_snapshots: + description: + - Number of snapshots to keep beyond the current snapshot. + - Docker default value is C(0). + type: int + log_entries_for_slow_followers: + description: + - Number of log entries to keep around to sync up slow followers after a snapshot is created. + type: int + heartbeat_tick: + description: + - Amount of ticks (in seconds) between each heartbeat. + - Docker default value is C(1s). + type: int + election_tick: + description: + - Amount of ticks (in seconds) needed without a leader to trigger a new election. + - Docker default value is C(10s). + type: int + dispatcher_heartbeat_period: + description: + - The delay for an agent to send a heartbeat to the dispatcher. + - Docker default value is C(5s). + type: int + node_cert_expiry: + description: + - Automatic expiry for nodes certificates. + - Docker default value is C(3months). + type: int + name: + description: + - The name of the swarm. + type: str + labels: + description: + - User-defined key/value metadata. + - Label operations in this module apply to the docker swarm cluster. + Use M(community.docker.docker_node) module to add/modify/remove swarm node labels. + - Requires API version >= 1.32. + type: dict + signing_ca_cert: + description: + - The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format. + - This must not be a path to a certificate, but the contents of the certificate. + - Requires API version >= 1.30. + type: str + signing_ca_key: + description: + - The desired signing CA key for all swarm node TLS leaf certificates, in PEM format. + - This must not be a path to a key, but the contents of the key. + - Requires API version >= 1.30. + type: str + ca_force_rotate: + description: + - An integer whose purpose is to force swarm to generate a new signing CA certificate and key, + if none have been specified. + - Docker default value is C(0). + - Requires API version >= 1.30. + type: int + autolock_managers: + description: + - If set, generate a key and use it to lock data stored on the managers. + - Docker default value is C(no). + - M(community.docker.docker_swarm_info) can be used to retrieve the unlock key. + type: bool + rotate_worker_token: + description: Rotate the worker join token. + type: bool + default: no + rotate_manager_token: + description: Rotate the manager join token. + type: bool + default: no +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - Docker API >= 1.25 +author: + - Thierry Bouvet (@tbouvet) + - Piotr Wojciechowski (@WojciechowskiPiotr) +''' + +EXAMPLES = ''' + +- name: Init a new swarm with default parameters + community.docker.docker_swarm: + state: present + +- name: Update swarm configuration + community.docker.docker_swarm: + state: present + election_tick: 5 + +- name: Add nodes + community.docker.docker_swarm: + state: join + advertise_addr: 192.168.1.2 + join_token: SWMTKN-1--xxxxx + remote_addrs: [ '192.168.1.1:2377' ] + +- name: Leave swarm for a node + community.docker.docker_swarm: + state: absent + +- name: Remove a swarm manager + community.docker.docker_swarm: + state: absent + force: true + +- name: Remove node from swarm + community.docker.docker_swarm: + state: remove + node_id: mynode +''' + +RETURN = ''' +swarm_facts: + description: Informations about swarm. + returned: success + type: dict + contains: + JoinTokens: + description: Tokens to connect to the Swarm. + returned: success + type: dict + contains: + Worker: + description: Token to create a new *worker* node + returned: success + type: str + example: SWMTKN-1--xxxxx + Manager: + description: Token to create a new *manager* node + returned: success + type: str + example: SWMTKN-1--xxxxx + UnlockKey: + description: The swarm unlock-key if I(autolock_managers) is C(true). + returned: on success if I(autolock_managers) is C(true) + and swarm is initialised, or if I(autolock_managers) has changed. + type: str + example: SWMKEY-1-xxx + +actions: + description: Provides the actions done on the swarm. + returned: when action failed. + type: list + elements: str + example: "['This cluster is already a swarm cluster']" + +''' + +import json +import traceback + +try: + from docker.errors import DockerException, APIError +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.docker.plugins.module_utils.common import ( + DockerBaseClass, + DifferenceTracker, + RequestException, +) + +from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient + +from ansible.module_utils._text import to_native + + +class TaskParameters(DockerBaseClass): + def __init__(self): + super(TaskParameters, self).__init__() + + self.advertise_addr = None + self.listen_addr = None + self.remote_addrs = None + self.join_token = None + + # Spec + self.snapshot_interval = None + self.task_history_retention_limit = None + self.keep_old_snapshots = None + self.log_entries_for_slow_followers = None + self.heartbeat_tick = None + self.election_tick = None + self.dispatcher_heartbeat_period = None + self.node_cert_expiry = None + self.name = None + self.labels = None + self.log_driver = None + self.signing_ca_cert = None + self.signing_ca_key = None + self.ca_force_rotate = None + self.autolock_managers = None + self.rotate_worker_token = None + self.rotate_manager_token = None + self.default_addr_pool = None + self.subnet_size = None + + @staticmethod + def from_ansible_params(client): + result = TaskParameters() + for key, value in client.module.params.items(): + if key in result.__dict__: + setattr(result, key, value) + + result.update_parameters(client) + return result + + def update_from_swarm_info(self, swarm_info): + spec = swarm_info['Spec'] + + ca_config = spec.get('CAConfig') or dict() + if self.node_cert_expiry is None: + self.node_cert_expiry = ca_config.get('NodeCertExpiry') + if self.ca_force_rotate is None: + self.ca_force_rotate = ca_config.get('ForceRotate') + + dispatcher = spec.get('Dispatcher') or dict() + if self.dispatcher_heartbeat_period is None: + self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod') + + raft = spec.get('Raft') or dict() + if self.snapshot_interval is None: + self.snapshot_interval = raft.get('SnapshotInterval') + if self.keep_old_snapshots is None: + self.keep_old_snapshots = raft.get('KeepOldSnapshots') + if self.heartbeat_tick is None: + self.heartbeat_tick = raft.get('HeartbeatTick') + if self.log_entries_for_slow_followers is None: + self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers') + if self.election_tick is None: + self.election_tick = raft.get('ElectionTick') + + orchestration = spec.get('Orchestration') or dict() + if self.task_history_retention_limit is None: + self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit') + + encryption_config = spec.get('EncryptionConfig') or dict() + if self.autolock_managers is None: + self.autolock_managers = encryption_config.get('AutoLockManagers') + + if self.name is None: + self.name = spec['Name'] + + if self.labels is None: + self.labels = spec.get('Labels') or {} + + if 'LogDriver' in spec['TaskDefaults']: + self.log_driver = spec['TaskDefaults']['LogDriver'] + + def update_parameters(self, client): + assign = dict( + snapshot_interval='snapshot_interval', + task_history_retention_limit='task_history_retention_limit', + keep_old_snapshots='keep_old_snapshots', + log_entries_for_slow_followers='log_entries_for_slow_followers', + heartbeat_tick='heartbeat_tick', + election_tick='election_tick', + dispatcher_heartbeat_period='dispatcher_heartbeat_period', + node_cert_expiry='node_cert_expiry', + name='name', + labels='labels', + signing_ca_cert='signing_ca_cert', + signing_ca_key='signing_ca_key', + ca_force_rotate='ca_force_rotate', + autolock_managers='autolock_managers', + log_driver='log_driver', + ) + params = dict() + for dest, source in assign.items(): + if not client.option_minimal_versions[source]['supported']: + continue + value = getattr(self, source) + if value is not None: + params[dest] = value + self.spec = client.create_swarm_spec(**params) + + def compare_to_active(self, other, client, differences): + for k in self.__dict__: + if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token', + 'rotate_worker_token', 'rotate_manager_token', 'spec', + 'default_addr_pool', 'subnet_size'): + continue + if not client.option_minimal_versions[k]['supported']: + continue + value = getattr(self, k) + if value is None: + continue + other_value = getattr(other, k) + if value != other_value: + differences.add(k, parameter=value, active=other_value) + if self.rotate_worker_token: + differences.add('rotate_worker_token', parameter=True, active=False) + if self.rotate_manager_token: + differences.add('rotate_manager_token', parameter=True, active=False) + return differences + + +class SwarmManager(DockerBaseClass): + + def __init__(self, client, results): + + super(SwarmManager, self).__init__() + + self.client = client + self.results = results + self.check_mode = self.client.check_mode + self.swarm_info = {} + + self.state = client.module.params['state'] + self.force = client.module.params['force'] + self.node_id = client.module.params['node_id'] + + self.differences = DifferenceTracker() + self.parameters = TaskParameters.from_ansible_params(client) + + self.created = False + + def __call__(self): + choice_map = { + "present": self.init_swarm, + "join": self.join, + "absent": self.leave, + "remove": self.remove, + } + + choice_map.get(self.state)() + + if self.client.module._diff or self.parameters.debug: + diff = dict() + diff['before'], diff['after'] = self.differences.get_before_after() + self.results['diff'] = diff + + def inspect_swarm(self): + try: + data = self.client.inspect_swarm() + json_str = json.dumps(data, ensure_ascii=False) + self.swarm_info = json.loads(json_str) + + self.results['changed'] = False + self.results['swarm_facts'] = self.swarm_info + + unlock_key = self.get_unlock_key() + self.swarm_info.update(unlock_key) + except APIError: + return + + def get_unlock_key(self): + default = {'UnlockKey': None} + if not self.has_swarm_lock_changed(): + return default + try: + return self.client.get_unlock_key() or default + except APIError: + return default + + def has_swarm_lock_changed(self): + return self.parameters.autolock_managers and ( + self.created or self.differences.has_difference_for('autolock_managers') + ) + + def init_swarm(self): + if not self.force and self.client.check_if_swarm_manager(): + self.__update_swarm() + return + + if not self.check_mode: + init_arguments = { + 'advertise_addr': self.parameters.advertise_addr, + 'listen_addr': self.parameters.listen_addr, + 'force_new_cluster': self.force, + 'swarm_spec': self.parameters.spec, + } + if self.parameters.default_addr_pool is not None: + init_arguments['default_addr_pool'] = self.parameters.default_addr_pool + if self.parameters.subnet_size is not None: + init_arguments['subnet_size'] = self.parameters.subnet_size + try: + self.client.init_swarm(**init_arguments) + except APIError as exc: + self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc)) + + if not self.client.check_if_swarm_manager(): + if not self.check_mode: + self.client.fail("Swarm not created or other error!") + + self.created = True + self.inspect_swarm() + self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID'))) + self.differences.add('state', parameter='present', active='absent') + self.results['changed'] = True + self.results['swarm_facts'] = { + 'JoinTokens': self.swarm_info.get('JoinTokens'), + 'UnlockKey': self.swarm_info.get('UnlockKey') + } + + def __update_swarm(self): + try: + self.inspect_swarm() + version = self.swarm_info['Version']['Index'] + self.parameters.update_from_swarm_info(self.swarm_info) + old_parameters = TaskParameters() + old_parameters.update_from_swarm_info(self.swarm_info) + self.parameters.compare_to_active(old_parameters, self.client, self.differences) + if self.differences.empty: + self.results['actions'].append("No modification") + self.results['changed'] = False + return + update_parameters = TaskParameters.from_ansible_params(self.client) + update_parameters.update_parameters(self.client) + if not self.check_mode: + self.client.update_swarm( + version=version, swarm_spec=update_parameters.spec, + rotate_worker_token=self.parameters.rotate_worker_token, + rotate_manager_token=self.parameters.rotate_manager_token) + except APIError as exc: + self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc)) + return + + self.inspect_swarm() + self.results['actions'].append("Swarm cluster updated") + self.results['changed'] = True + + def join(self): + if self.client.check_if_swarm_node(): + self.results['actions'].append("This node is already part of a swarm.") + return + if not self.check_mode: + try: + self.client.join_swarm( + remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token, + listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr) + except APIError as exc: + self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc)) + self.results['actions'].append("New node is added to swarm cluster") + self.differences.add('joined', parameter=True, active=False) + self.results['changed'] = True + + def leave(self): + if not self.client.check_if_swarm_node(): + self.results['actions'].append("This node is not part of a swarm.") + return + if not self.check_mode: + try: + self.client.leave_swarm(force=self.force) + except APIError as exc: + self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc)) + self.results['actions'].append("Node has left the swarm cluster") + self.differences.add('joined', parameter='absent', active='present') + self.results['changed'] = True + + def remove(self): + if not self.client.check_if_swarm_manager(): + self.client.fail("This node is not a manager.") + + try: + status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5) + except APIError: + return + + if not status_down: + self.client.fail("Can not remove the node. The status node is ready and not down.") + + if not self.check_mode: + try: + self.client.remove_node(node_id=self.node_id, force=self.force) + except APIError as exc: + self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc)) + self.results['actions'].append("Node is removed from swarm cluster.") + self.differences.add('joined', parameter=False, active=True) + self.results['changed'] = True + + +def _detect_remove_operation(client): + return client.module.params['state'] == 'remove' + + +def main(): + argument_spec = dict( + advertise_addr=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove']), + force=dict(type='bool', default=False), + listen_addr=dict(type='str', default='0.0.0.0:2377'), + remote_addrs=dict(type='list', elements='str'), + join_token=dict(type='str'), + snapshot_interval=dict(type='int'), + task_history_retention_limit=dict(type='int'), + keep_old_snapshots=dict(type='int'), + log_entries_for_slow_followers=dict(type='int'), + heartbeat_tick=dict(type='int'), + election_tick=dict(type='int'), + dispatcher_heartbeat_period=dict(type='int'), + node_cert_expiry=dict(type='int'), + name=dict(type='str'), + labels=dict(type='dict'), + signing_ca_cert=dict(type='str'), + signing_ca_key=dict(type='str', no_log=True), + ca_force_rotate=dict(type='int'), + autolock_managers=dict(type='bool'), + node_id=dict(type='str'), + rotate_worker_token=dict(type='bool', default=False), + rotate_manager_token=dict(type='bool', default=False), + default_addr_pool=dict(type='list', elements='str'), + subnet_size=dict(type='int'), + ) + + required_if = [ + ('state', 'join', ['remote_addrs', 'join_token']), + ('state', 'remove', ['node_id']) + ] + + option_minimal_versions = dict( + labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'), + signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + autolock_managers=dict(docker_py_version='2.6.0'), + log_driver=dict(docker_py_version='2.6.0'), + remove_operation=dict( + docker_py_version='2.4.0', + detect_usage=_detect_remove_operation, + usage_msg='remove swarm nodes' + ), + default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'), + subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'), + ) + + client = AnsibleDockerSwarmClient( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + min_docker_version='1.10.0', + min_docker_api_version='1.25', + option_minimal_versions=option_minimal_versions, + ) + + try: + results = dict( + changed=False, + result='', + actions=[] + ) + + SwarmManager(client, results)() + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py new file mode 100644 index 00000000..095bf7fe --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py @@ -0,0 +1,384 @@ +#!/usr/bin/python +# +# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: docker_swarm_info + +short_description: Retrieves facts about Docker Swarm cluster. + +description: + - Retrieves facts about a Docker Swarm. + - Returns lists of swarm objects names for the services - nodes, services, tasks. + - The output differs depending on API version available on docker host. + - Must be run on Swarm Manager node; otherwise module fails with error message. + It does return boolean flags in on both error and success which indicate whether + the docker daemon can be communicated with, whether it is in Swarm mode, and + whether it is a Swarm Manager node. + + +author: + - Piotr Wojciechowski (@WojciechowskiPiotr) + +options: + nodes: + description: + - Whether to list swarm nodes. + type: bool + default: no + nodes_filters: + description: + - A dictionary of filter values used for selecting nodes to list. + - "For example, C(name: mynode)." + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/node_ls/#filtering) + for more information on possible filters. + type: dict + services: + description: + - Whether to list swarm services. + type: bool + default: no + services_filters: + description: + - A dictionary of filter values used for selecting services to list. + - "For example, C(name: myservice)." + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ls/#filtering) + for more information on possible filters. + type: dict + tasks: + description: + - Whether to list containers. + type: bool + default: no + tasks_filters: + description: + - A dictionary of filter values used for selecting tasks to list. + - "For example, C(node: mynode-1)." + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ps/#filtering) + for more information on possible filters. + type: dict + unlock_key: + description: + - Whether to retrieve the swarm unlock key. + type: bool + default: no + verbose_output: + description: + - When set to C(yes) and I(nodes), I(services) or I(tasks) is set to C(yes), then the module output will + contain verbose information about objects matching the full output of API method. + - For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/). + - The verbose output in this module contains only subset of information returned by I(_info) module + for each type of the objects. + type: bool + default: no +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.24" +''' + +EXAMPLES = ''' +- name: Get info on Docker Swarm + community.docker.docker_swarm_info: + ignore_errors: yes + register: result + +- name: Inform about basic flags + ansible.builtin.debug: + msg: | + Was able to talk to docker daemon: {{ result.can_talk_to_docker }} + Docker in Swarm mode: {{ result.docker_swarm_active }} + This is a Manager node: {{ result.docker_swarm_manager }} + +- block: + +- name: Get info on Docker Swarm and list of registered nodes + community.docker.docker_swarm_info: + nodes: yes + register: result + +- name: Get info on Docker Swarm and extended list of registered nodes + community.docker.docker_swarm_info: + nodes: yes + verbose_output: yes + register: result + +- name: Get info on Docker Swarm and filtered list of registered nodes + community.docker.docker_swarm_info: + nodes: yes + nodes_filters: + name: mynode + register: result + +- ansible.builtin.debug: + var: result.swarm_facts + +- name: Get the swarm unlock key + community.docker.docker_swarm_info: + unlock_key: yes + register: result + +- ansible.builtin.debug: + var: result.swarm_unlock_key + +''' + +RETURN = ''' +can_talk_to_docker: + description: + - Will be C(true) if the module can talk to the docker daemon. + returned: both on success and on error + type: bool +docker_swarm_active: + description: + - Will be C(true) if the module can talk to the docker daemon, + and the docker daemon is in Swarm mode. + returned: both on success and on error + type: bool +docker_swarm_manager: + description: + - Will be C(true) if the module can talk to the docker daemon, + the docker daemon is in Swarm mode, and the current node is + a manager node. + - Only if this one is C(true), the module will not fail. + returned: both on success and on error + type: bool +swarm_facts: + description: + - Facts representing the basic state of the docker Swarm cluster. + - Contains tokens to connect to the Swarm + returned: always + type: dict +swarm_unlock_key: + description: + - Contains the key needed to unlock the swarm. + returned: When I(unlock_key) is C(true). + type: str +nodes: + description: + - List of dict objects containing the basic information about each volume. + Keys matches the C(docker node ls) output unless I(verbose_output=yes). + See description for I(verbose_output). + returned: When I(nodes) is C(yes) + type: list + elements: dict +services: + description: + - List of dict objects containing the basic information about each volume. + Keys matches the C(docker service ls) output unless I(verbose_output=yes). + See description for I(verbose_output). + returned: When I(services) is C(yes) + type: list + elements: dict +tasks: + description: + - List of dict objects containing the basic information about each volume. + Keys matches the C(docker service ps) output unless I(verbose_output=yes). + See description for I(verbose_output). + returned: When I(tasks) is C(yes) + type: list + elements: dict + +''' + +import traceback + +try: + from docker.errors import DockerException, APIError +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker_common + pass + +from ansible.module_utils._text import to_native + +from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient +from ansible_collections.community.docker.plugins.module_utils.common import ( + DockerBaseClass, + clean_dict_booleans_for_docker_api, + RequestException, +) + + +class DockerSwarmManager(DockerBaseClass): + + def __init__(self, client, results): + + super(DockerSwarmManager, self).__init__() + + self.client = client + self.results = results + self.verbose_output = self.client.module.params['verbose_output'] + + listed_objects = ['tasks', 'services', 'nodes'] + + self.client.fail_task_if_not_swarm_manager() + + self.results['swarm_facts'] = self.get_docker_swarm_facts() + + for docker_object in listed_objects: + if self.client.module.params[docker_object]: + returned_name = docker_object + filter_name = docker_object + "_filters" + filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name)) + self.results[returned_name] = self.get_docker_items_list(docker_object, filters) + if self.client.module.params['unlock_key']: + self.results['swarm_unlock_key'] = self.get_docker_swarm_unlock_key() + + def get_docker_swarm_facts(self): + try: + return self.client.inspect_swarm() + except APIError as exc: + self.client.fail("Error inspecting docker swarm: %s" % to_native(exc)) + + def get_docker_items_list(self, docker_object=None, filters=None): + items = None + items_list = [] + + try: + if docker_object == 'nodes': + items = self.client.nodes(filters=filters) + elif docker_object == 'tasks': + items = self.client.tasks(filters=filters) + elif docker_object == 'services': + items = self.client.services(filters=filters) + except APIError as exc: + self.client.fail("Error inspecting docker swarm for object '%s': %s" % + (docker_object, to_native(exc))) + + if self.verbose_output: + return items + + for item in items: + item_record = dict() + + if docker_object == 'nodes': + item_record = self.get_essential_facts_nodes(item) + elif docker_object == 'tasks': + item_record = self.get_essential_facts_tasks(item) + elif docker_object == 'services': + item_record = self.get_essential_facts_services(item) + if item_record['Mode'] == 'Global': + item_record['Replicas'] = len(items) + items_list.append(item_record) + + return items_list + + @staticmethod + def get_essential_facts_nodes(item): + object_essentials = dict() + + object_essentials['ID'] = item.get('ID') + object_essentials['Hostname'] = item['Description']['Hostname'] + object_essentials['Status'] = item['Status']['State'] + object_essentials['Availability'] = item['Spec']['Availability'] + if 'ManagerStatus' in item: + object_essentials['ManagerStatus'] = item['ManagerStatus']['Reachability'] + if 'Leader' in item['ManagerStatus'] and item['ManagerStatus']['Leader'] is True: + object_essentials['ManagerStatus'] = "Leader" + else: + object_essentials['ManagerStatus'] = None + object_essentials['EngineVersion'] = item['Description']['Engine']['EngineVersion'] + + return object_essentials + + def get_essential_facts_tasks(self, item): + object_essentials = dict() + + object_essentials['ID'] = item['ID'] + # Returning container ID to not trigger another connection to host + # Container ID is sufficient to get extended info in other tasks + object_essentials['ContainerID'] = item['Status']['ContainerStatus']['ContainerID'] + object_essentials['Image'] = item['Spec']['ContainerSpec']['Image'] + object_essentials['Node'] = self.client.get_node_name_by_id(item['NodeID']) + object_essentials['DesiredState'] = item['DesiredState'] + object_essentials['CurrentState'] = item['Status']['State'] + if 'Err' in item['Status']: + object_essentials['Error'] = item['Status']['Err'] + else: + object_essentials['Error'] = None + + return object_essentials + + @staticmethod + def get_essential_facts_services(item): + object_essentials = dict() + + object_essentials['ID'] = item['ID'] + object_essentials['Name'] = item['Spec']['Name'] + if 'Replicated' in item['Spec']['Mode']: + object_essentials['Mode'] = "Replicated" + object_essentials['Replicas'] = item['Spec']['Mode']['Replicated']['Replicas'] + elif 'Global' in item['Spec']['Mode']: + object_essentials['Mode'] = "Global" + # Number of replicas have to be updated in calling method or may be left as None + object_essentials['Replicas'] = None + object_essentials['Image'] = item['Spec']['TaskTemplate']['ContainerSpec']['Image'] + if 'Ports' in item['Spec']['EndpointSpec']: + object_essentials['Ports'] = item['Spec']['EndpointSpec']['Ports'] + else: + object_essentials['Ports'] = [] + + return object_essentials + + def get_docker_swarm_unlock_key(self): + unlock_key = self.client.get_unlock_key() or {} + return unlock_key.get('UnlockKey') or None + + +def main(): + argument_spec = dict( + nodes=dict(type='bool', default=False), + nodes_filters=dict(type='dict'), + tasks=dict(type='bool', default=False), + tasks_filters=dict(type='dict'), + services=dict(type='bool', default=False), + services_filters=dict(type='dict'), + unlock_key=dict(type='bool', default=False), + verbose_output=dict(type='bool', default=False), + ) + option_minimal_versions = dict( + unlock_key=dict(docker_py_version='2.7.0', docker_api_version='1.25'), + ) + + client = AnsibleDockerSwarmClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='1.10.0', + min_docker_api_version='1.24', + option_minimal_versions=option_minimal_versions, + fail_results=dict( + can_talk_to_docker=False, + docker_swarm_active=False, + docker_swarm_manager=False, + ), + ) + client.fail_results['can_talk_to_docker'] = True + client.fail_results['docker_swarm_active'] = client.check_if_swarm_node() + client.fail_results['docker_swarm_manager'] = client.check_if_swarm_manager() + + try: + results = dict( + changed=False, + ) + + DockerSwarmManager(client, results) + results.update(client.fail_results) + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py new file mode 100644 index 00000000..f8a4bd00 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py @@ -0,0 +1,2800 @@ +#!/usr/bin/python +# +# (c) 2017, Dario Zanzico (git@dariozanzico.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: docker_swarm_service +author: + - "Dario Zanzico (@dariko)" + - "Jason Witkowski (@jwitko)" + - "Hannes Ljungberg (@hannseman)" +short_description: docker swarm service +description: + - Manages docker services via a swarm manager node. +options: + args: + description: + - List arguments to be passed to the container. + - Corresponds to the C(ARG) parameter of C(docker service create). + type: list + elements: str + command: + description: + - Command to execute when the container starts. + - A command may be either a string or a list or a list of strings. + - Corresponds to the C(COMMAND) parameter of C(docker service create). + type: raw + configs: + description: + - List of dictionaries describing the service configs. + - Corresponds to the C(--config) option of C(docker service create). + - Requires API version >= 1.30. + type: list + elements: dict + suboptions: + config_id: + description: + - Config's ID. + type: str + config_name: + description: + - Config's name as defined at its creation. + type: str + required: yes + filename: + description: + - Name of the file containing the config. Defaults to the I(config_name) if not specified. + type: str + uid: + description: + - UID of the config file's owner. + type: str + gid: + description: + - GID of the config file's group. + type: str + mode: + description: + - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)). + type: int + container_labels: + description: + - Dictionary of key value pairs. + - Corresponds to the C(--container-label) option of C(docker service create). + type: dict + dns: + description: + - List of custom DNS servers. + - Corresponds to the C(--dns) option of C(docker service create). + - Requires API version >= 1.25. + type: list + elements: str + dns_search: + description: + - List of custom DNS search domains. + - Corresponds to the C(--dns-search) option of C(docker service create). + - Requires API version >= 1.25. + type: list + elements: str + dns_options: + description: + - List of custom DNS options. + - Corresponds to the C(--dns-option) option of C(docker service create). + - Requires API version >= 1.25. + type: list + elements: str + endpoint_mode: + description: + - Service endpoint mode. + - Corresponds to the C(--endpoint-mode) option of C(docker service create). + - Requires API version >= 1.25. + type: str + choices: + - vip + - dnsrr + env: + description: + - List or dictionary of the service environment variables. + - If passed a list each items need to be in the format of C(KEY=VALUE). + - If passed a dictionary values which might be parsed as numbers, + booleans or other types by the YAML parser must be quoted (e.g. C("true")) + in order to avoid data loss. + - Corresponds to the C(--env) option of C(docker service create). + type: raw + env_files: + description: + - List of paths to files, present on the target, containing environment variables C(FOO=BAR). + - The order of the list is significant in determining the value assigned to a + variable that shows up more than once. + - If variable also present in I(env), then I(env) value will override. + type: list + elements: path + force_update: + description: + - Force update even if no changes require it. + - Corresponds to the C(--force) option of C(docker service update). + - Requires API version >= 1.25. + type: bool + default: no + groups: + description: + - List of additional group names and/or IDs that the container process will run as. + - Corresponds to the C(--group) option of C(docker service update). + - Requires API version >= 1.25. + type: list + elements: str + healthcheck: + description: + - Configure a check that is run to determine whether or not containers for this service are "healthy". + See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck) + for details on how healthchecks work. + - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format + that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Requires API version >= 1.25. + type: dict + suboptions: + test: + description: + - Command to run to check health. + - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL). + type: raw + interval: + description: + - Time between running the check. + type: str + timeout: + description: + - Maximum time to allow one check to run. + type: str + retries: + description: + - Consecutive failures needed to report unhealthy. It accept integer value. + type: int + start_period: + description: + - Start period for the container to initialize before starting health-retries countdown. + type: str + hostname: + description: + - Container hostname. + - Corresponds to the C(--hostname) option of C(docker service create). + - Requires API version >= 1.25. + type: str + hosts: + description: + - Dict of host-to-IP mappings, where each host name is a key in the dictionary. + Each host name will be added to the container's /etc/hosts file. + - Corresponds to the C(--host) option of C(docker service create). + - Requires API version >= 1.25. + type: dict + image: + description: + - Service image path and tag. + - Corresponds to the C(IMAGE) parameter of C(docker service create). + type: str + init: + description: + - Use an init inside each service container to forward signals and reap processes. + - Corresponds to the C(--init) option of C(docker service create). + - Requires API version >= 1.37. + type: bool + labels: + description: + - Dictionary of key value pairs. + - Corresponds to the C(--label) option of C(docker service create). + type: dict + limits: + description: + - Configures service resource limits. + suboptions: + cpus: + description: + - Service CPU limit. C(0) equals no limit. + - Corresponds to the C(--limit-cpu) option of C(docker service create). + type: float + memory: + description: + - "Service memory limit in format C(<number>[<unit>]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - C(0) equals no limit. + - Omitting the unit defaults to bytes. + - Corresponds to the C(--limit-memory) option of C(docker service create). + type: str + type: dict + logging: + description: + - "Logging configuration for the service." + suboptions: + driver: + description: + - Configure the logging driver for a service. + - Corresponds to the C(--log-driver) option of C(docker service create). + type: str + options: + description: + - Options for service logging driver. + - Corresponds to the C(--log-opt) option of C(docker service create). + type: dict + type: dict + mode: + description: + - Service replication mode. + - Service will be removed and recreated when changed. + - Corresponds to the C(--mode) option of C(docker service create). + type: str + default: replicated + choices: + - replicated + - global + mounts: + description: + - List of dictionaries describing the service mounts. + - Corresponds to the C(--mount) option of C(docker service create). + type: list + elements: dict + suboptions: + source: + description: + - Mount source (e.g. a volume name or a host path). + - Must be specified if I(type) is not C(tmpfs). + type: str + target: + description: + - Container path. + type: str + required: yes + type: + description: + - The mount type. + - Note that C(npipe) is only supported by Docker for Windows. Also note that C(npipe) was added in Ansible 2.9. + type: str + default: bind + choices: + - bind + - volume + - tmpfs + - npipe + readonly: + description: + - Whether the mount should be read-only. + type: bool + labels: + description: + - Volume labels to apply. + type: dict + propagation: + description: + - The propagation mode to use. + - Can only be used when I(type) is C(bind). + type: str + choices: + - shared + - slave + - private + - rshared + - rslave + - rprivate + no_copy: + description: + - Disable copying of data from a container when a volume is created. + - Can only be used when I(type) is C(volume). + type: bool + driver_config: + description: + - Volume driver configuration. + - Can only be used when I(type) is C(volume). + suboptions: + name: + description: + - Name of the volume-driver plugin to use for the volume. + type: str + options: + description: + - Options as key-value pairs to pass to the driver for this volume. + type: dict + type: dict + tmpfs_size: + description: + - "Size of the tmpfs mount in format C(<number>[<unit>]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - Can only be used when I(type) is C(tmpfs). + type: str + tmpfs_mode: + description: + - File mode of the tmpfs in octal. + - Can only be used when I(type) is C(tmpfs). + type: int + name: + description: + - Service name. + - Corresponds to the C(--name) option of C(docker service create). + type: str + required: yes + networks: + description: + - List of the service networks names or dictionaries. + - When passed dictionaries valid sub-options are I(name), which is required, and + I(aliases) and I(options). + - Prior to API version 1.29, updating and removing networks is not supported. + If changes are made the service will then be removed and recreated. + - Corresponds to the C(--network) option of C(docker service create). + type: list + elements: raw + placement: + description: + - Configures service placement preferences and constraints. + suboptions: + constraints: + description: + - List of the service constraints. + - Corresponds to the C(--constraint) option of C(docker service create). + type: list + elements: str + preferences: + description: + - List of the placement preferences as key value pairs. + - Corresponds to the C(--placement-pref) option of C(docker service create). + - Requires API version >= 1.27. + type: list + elements: dict + type: dict + publish: + description: + - List of dictionaries describing the service published ports. + - Corresponds to the C(--publish) option of C(docker service create). + - Requires API version >= 1.25. + type: list + elements: dict + suboptions: + published_port: + description: + - The port to make externally available. + type: int + required: yes + target_port: + description: + - The port inside the container to expose. + type: int + required: yes + protocol: + description: + - What protocol to use. + type: str + default: tcp + choices: + - tcp + - udp + mode: + description: + - What publish mode to use. + - Requires API version >= 1.32. + type: str + choices: + - ingress + - host + read_only: + description: + - Mount the containers root filesystem as read only. + - Corresponds to the C(--read-only) option of C(docker service create). + type: bool + replicas: + description: + - Number of containers instantiated in the service. Valid only if I(mode) is C(replicated). + - If set to C(-1), and service is not present, service replicas will be set to C(1). + - If set to C(-1), and service is present, service replicas will be unchanged. + - Corresponds to the C(--replicas) option of C(docker service create). + type: int + default: -1 + reservations: + description: + - Configures service resource reservations. + suboptions: + cpus: + description: + - Service CPU reservation. C(0) equals no reservation. + - Corresponds to the C(--reserve-cpu) option of C(docker service create). + type: float + memory: + description: + - "Service memory reservation in format C(<number>[<unit>]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - C(0) equals no reservation. + - Omitting the unit defaults to bytes. + - Corresponds to the C(--reserve-memory) option of C(docker service create). + type: str + type: dict + resolve_image: + description: + - If the current image digest should be resolved from registry and updated if changed. + - Requires API version >= 1.30. + type: bool + default: no + restart_config: + description: + - Configures if and how to restart containers when they exit. + suboptions: + condition: + description: + - Restart condition of the service. + - Corresponds to the C(--restart-condition) option of C(docker service create). + type: str + choices: + - none + - on-failure + - any + delay: + description: + - Delay between restarts. + - "Accepts a a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--restart-delay) option of C(docker service create). + type: str + max_attempts: + description: + - Maximum number of service restarts. + - Corresponds to the C(--restart-condition) option of C(docker service create). + type: int + window: + description: + - Restart policy evaluation window. + - "Accepts a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--restart-window) option of C(docker service create). + type: str + type: dict + rollback_config: + description: + - Configures how the service should be rolled back in case of a failing update. + suboptions: + parallelism: + description: + - The number of containers to rollback at a time. If set to 0, all containers rollback simultaneously. + - Corresponds to the C(--rollback-parallelism) option of C(docker service create). + - Requires API version >= 1.28. + type: int + delay: + description: + - Delay between task rollbacks. + - "Accepts a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--rollback-delay) option of C(docker service create). + - Requires API version >= 1.28. + type: str + failure_action: + description: + - Action to take in case of rollback failure. + - Corresponds to the C(--rollback-failure-action) option of C(docker service create). + - Requires API version >= 1.28. + type: str + choices: + - continue + - pause + monitor: + description: + - Duration after each task rollback to monitor for failure. + - "Accepts a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--rollback-monitor) option of C(docker service create). + - Requires API version >= 1.28. + type: str + max_failure_ratio: + description: + - Fraction of tasks that may fail during a rollback. + - Corresponds to the C(--rollback-max-failure-ratio) option of C(docker service create). + - Requires API version >= 1.28. + type: float + order: + description: + - Specifies the order of operations during rollbacks. + - Corresponds to the C(--rollback-order) option of C(docker service create). + - Requires API version >= 1.29. + type: str + type: dict + secrets: + description: + - List of dictionaries describing the service secrets. + - Corresponds to the C(--secret) option of C(docker service create). + - Requires API version >= 1.25. + type: list + elements: dict + suboptions: + secret_id: + description: + - Secret's ID. + type: str + secret_name: + description: + - Secret's name as defined at its creation. + type: str + required: yes + filename: + description: + - Name of the file containing the secret. Defaults to the I(secret_name) if not specified. + - Corresponds to the C(target) key of C(docker service create --secret). + type: str + uid: + description: + - UID of the secret file's owner. + type: str + gid: + description: + - GID of the secret file's group. + type: str + mode: + description: + - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)). + type: int + state: + description: + - C(absent) - A service matching the specified name will be removed and have its tasks stopped. + - C(present) - Asserts the existence of a service matching the name and provided configuration parameters. + Unspecified configuration parameters will be set to docker defaults. + type: str + default: present + choices: + - present + - absent + stop_grace_period: + description: + - Time to wait before force killing a container. + - "Accepts a duration as a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--stop-grace-period) option of C(docker service create). + type: str + stop_signal: + description: + - Override default signal used to stop the container. + - Corresponds to the C(--stop-signal) option of C(docker service create). + type: str + tty: + description: + - Allocate a pseudo-TTY. + - Corresponds to the C(--tty) option of C(docker service create). + - Requires API version >= 1.25. + type: bool + update_config: + description: + - Configures how the service should be updated. Useful for configuring rolling updates. + suboptions: + parallelism: + description: + - Rolling update parallelism. + - Corresponds to the C(--update-parallelism) option of C(docker service create). + type: int + delay: + description: + - Rolling update delay. + - "Accepts a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--update-delay) option of C(docker service create). + type: str + failure_action: + description: + - Action to take in case of container failure. + - Corresponds to the C(--update-failure-action) option of C(docker service create). + - Usage of I(rollback) requires API version >= 1.29. + type: str + choices: + - continue + - pause + - rollback + monitor: + description: + - Time to monitor updated tasks for failures. + - "Accepts a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--update-monitor) option of C(docker service create). + - Requires API version >= 1.25. + type: str + max_failure_ratio: + description: + - Fraction of tasks that may fail during an update before the failure action is invoked. + - Corresponds to the C(--update-max-failure-ratio) option of C(docker service create). + - Requires API version >= 1.25. + type: float + order: + description: + - Specifies the order of operations when rolling out an updated task. + - Corresponds to the C(--update-order) option of C(docker service create). + - Requires API version >= 1.29. + type: str + type: dict + user: + description: + - Sets the username or UID used for the specified command. + - Before Ansible 2.8, the default value for this option was C(root). + - The default has been removed so that the user defined in the image is used if no user is specified here. + - Corresponds to the C(--user) option of C(docker service create). + type: str + working_dir: + description: + - Path to the working directory. + - Corresponds to the C(--workdir) option of C(docker service create). + type: str +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_2_documentation + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.2" + - "Docker API >= 1.24" +notes: + - "Images will only resolve to the latest digest when using Docker API >= 1.30 and Docker SDK for Python >= 3.2.0. + When using older versions use C(force_update: true) to trigger the swarm to resolve a new image." +''' + +RETURN = ''' +swarm_service: + returned: always + type: dict + description: + - Dictionary of variables representing the current state of the service. + Matches the module parameters format. + - Note that facts are not part of registered vars but accessible directly. + - Note that before Ansible 2.7.9, the return variable was documented as C(ansible_swarm_service), + while the module actually returned a variable called C(ansible_docker_service). The variable + was renamed to C(swarm_service) in both code and documentation for Ansible 2.7.9 and Ansible 2.8.0. + In Ansible 2.7.x, the old name C(ansible_docker_service) can still be used. + sample: '{ + "args": [ + "3600" + ], + "command": [ + "sleep" + ], + "configs": null, + "constraints": [ + "node.role == manager", + "engine.labels.operatingsystem == ubuntu 14.04" + ], + "container_labels": null, + "dns": null, + "dns_options": null, + "dns_search": null, + "endpoint_mode": null, + "env": [ + "ENVVAR1=envvar1", + "ENVVAR2=envvar2" + ], + "force_update": null, + "groups": null, + "healthcheck": { + "interval": 90000000000, + "retries": 3, + "start_period": 30000000000, + "test": [ + "CMD", + "curl", + "--fail", + "http://nginx.host.com" + ], + "timeout": 10000000000 + }, + "healthcheck_disabled": false, + "hostname": null, + "hosts": null, + "image": "alpine:latest@sha256:b3dbf31b77fd99d9c08f780ce6f5282aba076d70a513a8be859d8d3a4d0c92b8", + "labels": { + "com.example.department": "Finance", + "com.example.description": "Accounting webapp" + }, + "limit_cpu": 0.5, + "limit_memory": 52428800, + "log_driver": "fluentd", + "log_driver_options": { + "fluentd-address": "127.0.0.1:24224", + "fluentd-async-connect": "true", + "tag": "myservice" + }, + "mode": "replicated", + "mounts": [ + { + "readonly": false, + "source": "/tmp/", + "target": "/remote_tmp/", + "type": "bind", + "labels": null, + "propagation": null, + "no_copy": null, + "driver_config": null, + "tmpfs_size": null, + "tmpfs_mode": null + } + ], + "networks": null, + "placement_preferences": [ + { + "spread": "node.labels.mylabel" + } + ], + "publish": null, + "read_only": null, + "replicas": 1, + "reserve_cpu": 0.25, + "reserve_memory": 20971520, + "restart_policy": "on-failure", + "restart_policy_attempts": 3, + "restart_policy_delay": 5000000000, + "restart_policy_window": 120000000000, + "secrets": null, + "stop_grace_period": null, + "stop_signal": null, + "tty": null, + "update_delay": 10000000000, + "update_failure_action": null, + "update_max_failure_ratio": null, + "update_monitor": null, + "update_order": "stop-first", + "update_parallelism": 2, + "user": null, + "working_dir": null + }' +changes: + returned: always + description: + - List of changed service attributes if a service has been altered, [] otherwise. + type: list + elements: str + sample: ['container_labels', 'replicas'] +rebuilt: + returned: always + description: + - True if the service has been recreated (removed and created) + type: bool + sample: True +''' + +EXAMPLES = ''' +- name: Set command and arguments + community.docker.docker_swarm_service: + name: myservice + image: alpine + command: sleep + args: + - "3600" + +- name: Set a bind mount + community.docker.docker_swarm_service: + name: myservice + image: alpine + mounts: + - source: /tmp/ + target: /remote_tmp/ + type: bind + +- name: Set service labels + community.docker.docker_swarm_service: + name: myservice + image: alpine + labels: + com.example.description: "Accounting webapp" + com.example.department: "Finance" + +- name: Set environment variables + community.docker.docker_swarm_service: + name: myservice + image: alpine + env: + ENVVAR1: envvar1 + ENVVAR2: envvar2 + env_files: + - envs/common.env + - envs/apps/web.env + +- name: Set fluentd logging + community.docker.docker_swarm_service: + name: myservice + image: alpine + logging: + driver: fluentd + options: + fluentd-address: "127.0.0.1:24224" + fluentd-async-connect: "true" + tag: myservice + +- name: Set restart policies + community.docker.docker_swarm_service: + name: myservice + image: alpine + restart_config: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + +- name: Set update config + community.docker.docker_swarm_service: + name: myservice + image: alpine + update_config: + parallelism: 2 + delay: 10s + order: stop-first + +- name: Set rollback config + community.docker.docker_swarm_service: + name: myservice + image: alpine + update_config: + failure_action: rollback + rollback_config: + parallelism: 2 + delay: 10s + order: stop-first + +- name: Set placement preferences + community.docker.docker_swarm_service: + name: myservice + image: alpine:edge + placement: + preferences: + - spread: node.labels.mylabel + constraints: + - node.role == manager + - engine.labels.operatingsystem == ubuntu 14.04 + +- name: Set configs + community.docker.docker_swarm_service: + name: myservice + image: alpine:edge + configs: + - config_name: myconfig_name + filename: "/tmp/config.txt" + +- name: Set networks + community.docker.docker_swarm_service: + name: myservice + image: alpine:edge + networks: + - mynetwork + +- name: Set networks as a dictionary + community.docker.docker_swarm_service: + name: myservice + image: alpine:edge + networks: + - name: "mynetwork" + aliases: + - "mynetwork_alias" + options: + foo: bar + +- name: Set secrets + community.docker.docker_swarm_service: + name: myservice + image: alpine:edge + secrets: + - secret_name: mysecret_name + filename: "/run/secrets/secret.txt" + +- name: Start service with healthcheck + community.docker.docker_swarm_service: + name: myservice + image: nginx:1.13 + healthcheck: + # Check if nginx server is healthy by curl'ing the server. + # If this fails or timeouts, the healthcheck fails. + test: ["CMD", "curl", "--fail", "http://nginx.host.com"] + interval: 1m30s + timeout: 10s + retries: 3 + start_period: 30s + +- name: Configure service resources + community.docker.docker_swarm_service: + name: myservice + image: alpine:edge + reservations: + cpus: 0.25 + memory: 20M + limits: + cpus: 0.50 + memory: 50M + +- name: Remove service + community.docker.docker_swarm_service: + name: myservice + state: absent +''' + +import shlex +import time +import operator +import traceback + +from distutils.version import LooseVersion + +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClient, + DifferenceTracker, + DockerBaseClass, + convert_duration_to_nanosecond, + parse_healthcheck, + clean_dict_booleans_for_docker_api, + RequestException, +) + +from ansible.module_utils.basic import human_to_bytes +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text + +try: + from docker import types + from docker.utils import ( + parse_repository_tag, + parse_env_file, + format_environment, + ) + from docker.errors import ( + APIError, + DockerException, + NotFound, + ) +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + + +def get_docker_environment(env, env_files): + """ + Will return a list of "KEY=VALUE" items. Supplied env variable can + be either a list or a dictionary. + + If environment files are combined with explicit environment variables, + the explicit environment variables take precedence. + """ + env_dict = {} + if env_files: + for env_file in env_files: + parsed_env_file = parse_env_file(env_file) + for name, value in parsed_env_file.items(): + env_dict[name] = str(value) + if env is not None and isinstance(env, string_types): + env = env.split(',') + if env is not None and isinstance(env, dict): + for name, value in env.items(): + if not isinstance(value, string_types): + raise ValueError( + 'Non-string value found for env option. ' + 'Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: %s' % name + ) + env_dict[name] = str(value) + elif env is not None and isinstance(env, list): + for item in env: + try: + name, value = item.split('=', 1) + except ValueError: + raise ValueError('Invalid environment variable found in list, needs to be in format KEY=VALUE.') + env_dict[name] = value + elif env is not None: + raise ValueError( + 'Invalid type for env %s (%s). Only list or dict allowed.' % (env, type(env)) + ) + env_list = format_environment(env_dict) + if not env_list: + if env is not None or env_files is not None: + return [] + else: + return None + return sorted(env_list) + + +def get_docker_networks(networks, network_ids): + """ + Validate a list of network names or a list of network dictionaries. + Network names will be resolved to ids by using the network_ids mapping. + """ + if networks is None: + return None + parsed_networks = [] + for network in networks: + if isinstance(network, string_types): + parsed_network = {'name': network} + elif isinstance(network, dict): + if 'name' not in network: + raise TypeError( + '"name" is required when networks are passed as dictionaries.' + ) + name = network.pop('name') + parsed_network = {'name': name} + aliases = network.pop('aliases', None) + if aliases is not None: + if not isinstance(aliases, list): + raise TypeError('"aliases" network option is only allowed as a list') + if not all( + isinstance(alias, string_types) for alias in aliases + ): + raise TypeError('Only strings are allowed as network aliases.') + parsed_network['aliases'] = aliases + options = network.pop('options', None) + if options is not None: + if not isinstance(options, dict): + raise TypeError('Only dict is allowed as network options.') + parsed_network['options'] = clean_dict_booleans_for_docker_api(options) + # Check if any invalid keys left + if network: + invalid_keys = ', '.join(network.keys()) + raise TypeError( + '%s are not valid keys for the networks option' % invalid_keys + ) + + else: + raise TypeError( + 'Only a list of strings or dictionaries are allowed to be passed as networks.' + ) + network_name = parsed_network.pop('name') + try: + parsed_network['id'] = network_ids[network_name] + except KeyError as e: + raise ValueError('Could not find a network named: %s.' % e) + parsed_networks.append(parsed_network) + return parsed_networks or [] + + +def get_nanoseconds_from_raw_option(name, value): + if value is None: + return None + elif isinstance(value, int): + return value + elif isinstance(value, string_types): + try: + return int(value) + except ValueError: + return convert_duration_to_nanosecond(value) + else: + raise ValueError( + 'Invalid type for %s %s (%s). Only string or int allowed.' + % (name, value, type(value)) + ) + + +def get_value(key, values, default=None): + value = values.get(key) + return value if value is not None else default + + +def has_dict_changed(new_dict, old_dict): + """ + Check if new_dict has differences compared to old_dict while + ignoring keys in old_dict which are None in new_dict. + """ + if new_dict is None: + return False + if not new_dict and old_dict: + return True + if not old_dict and new_dict: + return True + defined_options = dict( + (option, value) for option, value in new_dict.items() + if value is not None + ) + for option, value in defined_options.items(): + old_value = old_dict.get(option) + if not value and not old_value: + continue + if value != old_value: + return True + return False + + +def has_list_changed(new_list, old_list, sort_lists=True, sort_key=None): + """ + Check two lists have differences. Sort lists by default. + """ + + def sort_list(unsorted_list): + """ + Sort a given list. + The list may contain dictionaries, so use the sort key to handle them. + """ + + if unsorted_list and isinstance(unsorted_list[0], dict): + if not sort_key: + raise Exception( + 'A sort key was not specified when sorting list' + ) + else: + return sorted(unsorted_list, key=lambda k: k[sort_key]) + + # Either the list is empty or does not contain dictionaries + try: + return sorted(unsorted_list) + except TypeError: + return unsorted_list + + if new_list is None: + return False + old_list = old_list or [] + if len(new_list) != len(old_list): + return True + + if sort_lists: + zip_data = zip(sort_list(new_list), sort_list(old_list)) + else: + zip_data = zip(new_list, old_list) + for new_item, old_item in zip_data: + is_same_type = type(new_item) == type(old_item) + if not is_same_type: + if isinstance(new_item, string_types) and isinstance(old_item, string_types): + # Even though the types are different between these items, + # they are both strings. Try matching on the same string type. + try: + new_item_type = type(new_item) + old_item_casted = new_item_type(old_item) + if new_item != old_item_casted: + return True + else: + continue + except UnicodeEncodeError: + # Fallback to assuming the strings are different + return True + else: + return True + if isinstance(new_item, dict): + if has_dict_changed(new_item, old_item): + return True + elif new_item != old_item: + return True + + return False + + +def have_networks_changed(new_networks, old_networks): + """Special case list checking for networks to sort aliases""" + + if new_networks is None: + return False + old_networks = old_networks or [] + if len(new_networks) != len(old_networks): + return True + + zip_data = zip( + sorted(new_networks, key=lambda k: k['id']), + sorted(old_networks, key=lambda k: k['id']) + ) + + for new_item, old_item in zip_data: + new_item = dict(new_item) + old_item = dict(old_item) + # Sort the aliases + if 'aliases' in new_item: + new_item['aliases'] = sorted(new_item['aliases'] or []) + if 'aliases' in old_item: + old_item['aliases'] = sorted(old_item['aliases'] or []) + + if has_dict_changed(new_item, old_item): + return True + + return False + + +class DockerService(DockerBaseClass): + def __init__(self, docker_api_version, docker_py_version): + super(DockerService, self).__init__() + self.image = "" + self.command = None + self.args = None + self.endpoint_mode = None + self.dns = None + self.healthcheck = None + self.healthcheck_disabled = None + self.hostname = None + self.hosts = None + self.tty = None + self.dns_search = None + self.dns_options = None + self.env = None + self.force_update = None + self.groups = None + self.log_driver = None + self.log_driver_options = None + self.labels = None + self.container_labels = None + self.limit_cpu = None + self.limit_memory = None + self.reserve_cpu = None + self.reserve_memory = None + self.mode = "replicated" + self.user = None + self.mounts = None + self.configs = None + self.secrets = None + self.constraints = None + self.networks = None + self.stop_grace_period = None + self.stop_signal = None + self.publish = None + self.placement_preferences = None + self.replicas = -1 + self.service_id = False + self.service_version = False + self.read_only = None + self.restart_policy = None + self.restart_policy_attempts = None + self.restart_policy_delay = None + self.restart_policy_window = None + self.rollback_config = None + self.update_delay = None + self.update_parallelism = None + self.update_failure_action = None + self.update_monitor = None + self.update_max_failure_ratio = None + self.update_order = None + self.working_dir = None + self.init = None + + self.docker_api_version = docker_api_version + self.docker_py_version = docker_py_version + + def get_facts(self): + return { + 'image': self.image, + 'mounts': self.mounts, + 'configs': self.configs, + 'networks': self.networks, + 'command': self.command, + 'args': self.args, + 'tty': self.tty, + 'dns': self.dns, + 'dns_search': self.dns_search, + 'dns_options': self.dns_options, + 'healthcheck': self.healthcheck, + 'healthcheck_disabled': self.healthcheck_disabled, + 'hostname': self.hostname, + 'hosts': self.hosts, + 'env': self.env, + 'force_update': self.force_update, + 'groups': self.groups, + 'log_driver': self.log_driver, + 'log_driver_options': self.log_driver_options, + 'publish': self.publish, + 'constraints': self.constraints, + 'placement_preferences': self.placement_preferences, + 'labels': self.labels, + 'container_labels': self.container_labels, + 'mode': self.mode, + 'replicas': self.replicas, + 'endpoint_mode': self.endpoint_mode, + 'restart_policy': self.restart_policy, + 'secrets': self.secrets, + 'stop_grace_period': self.stop_grace_period, + 'stop_signal': self.stop_signal, + 'limit_cpu': self.limit_cpu, + 'limit_memory': self.limit_memory, + 'read_only': self.read_only, + 'reserve_cpu': self.reserve_cpu, + 'reserve_memory': self.reserve_memory, + 'restart_policy_delay': self.restart_policy_delay, + 'restart_policy_attempts': self.restart_policy_attempts, + 'restart_policy_window': self.restart_policy_window, + 'rollback_config': self.rollback_config, + 'update_delay': self.update_delay, + 'update_parallelism': self.update_parallelism, + 'update_failure_action': self.update_failure_action, + 'update_monitor': self.update_monitor, + 'update_max_failure_ratio': self.update_max_failure_ratio, + 'update_order': self.update_order, + 'user': self.user, + 'working_dir': self.working_dir, + 'init': self.init, + } + + @property + def can_update_networks(self): + # Before Docker API 1.29 adding/removing networks was not supported + return ( + self.docker_api_version >= LooseVersion('1.29') and + self.docker_py_version >= LooseVersion('2.7') + ) + + @property + def can_use_task_template_networks(self): + # In Docker API 1.25 attaching networks to TaskTemplate is preferred over Spec + return ( + self.docker_api_version >= LooseVersion('1.25') and + self.docker_py_version >= LooseVersion('2.7') + ) + + @staticmethod + def get_restart_config_from_ansible_params(params): + restart_config = params['restart_config'] or {} + condition = get_value( + 'condition', + restart_config, + ) + delay = get_value( + 'delay', + restart_config, + ) + delay = get_nanoseconds_from_raw_option( + 'restart_policy_delay', + delay + ) + max_attempts = get_value( + 'max_attempts', + restart_config, + ) + window = get_value( + 'window', + restart_config, + ) + window = get_nanoseconds_from_raw_option( + 'restart_policy_window', + window + ) + return { + 'restart_policy': condition, + 'restart_policy_delay': delay, + 'restart_policy_attempts': max_attempts, + 'restart_policy_window': window + } + + @staticmethod + def get_update_config_from_ansible_params(params): + update_config = params['update_config'] or {} + parallelism = get_value( + 'parallelism', + update_config, + ) + delay = get_value( + 'delay', + update_config, + ) + delay = get_nanoseconds_from_raw_option( + 'update_delay', + delay + ) + failure_action = get_value( + 'failure_action', + update_config, + ) + monitor = get_value( + 'monitor', + update_config, + ) + monitor = get_nanoseconds_from_raw_option( + 'update_monitor', + monitor + ) + max_failure_ratio = get_value( + 'max_failure_ratio', + update_config, + ) + order = get_value( + 'order', + update_config, + ) + return { + 'update_parallelism': parallelism, + 'update_delay': delay, + 'update_failure_action': failure_action, + 'update_monitor': monitor, + 'update_max_failure_ratio': max_failure_ratio, + 'update_order': order + } + + @staticmethod + def get_rollback_config_from_ansible_params(params): + if params['rollback_config'] is None: + return None + rollback_config = params['rollback_config'] or {} + delay = get_nanoseconds_from_raw_option( + 'rollback_config.delay', + rollback_config.get('delay') + ) + monitor = get_nanoseconds_from_raw_option( + 'rollback_config.monitor', + rollback_config.get('monitor') + ) + return { + 'parallelism': rollback_config.get('parallelism'), + 'delay': delay, + 'failure_action': rollback_config.get('failure_action'), + 'monitor': monitor, + 'max_failure_ratio': rollback_config.get('max_failure_ratio'), + 'order': rollback_config.get('order'), + + } + + @staticmethod + def get_logging_from_ansible_params(params): + logging_config = params['logging'] or {} + driver = get_value( + 'driver', + logging_config, + ) + options = get_value( + 'options', + logging_config, + ) + return { + 'log_driver': driver, + 'log_driver_options': options, + } + + @staticmethod + def get_limits_from_ansible_params(params): + limits = params['limits'] or {} + cpus = get_value( + 'cpus', + limits, + ) + memory = get_value( + 'memory', + limits, + ) + if memory is not None: + try: + memory = human_to_bytes(memory) + except ValueError as exc: + raise Exception('Failed to convert limit_memory to bytes: %s' % exc) + return { + 'limit_cpu': cpus, + 'limit_memory': memory, + } + + @staticmethod + def get_reservations_from_ansible_params(params): + reservations = params['reservations'] or {} + cpus = get_value( + 'cpus', + reservations, + ) + memory = get_value( + 'memory', + reservations, + ) + + if memory is not None: + try: + memory = human_to_bytes(memory) + except ValueError as exc: + raise Exception('Failed to convert reserve_memory to bytes: %s' % exc) + return { + 'reserve_cpu': cpus, + 'reserve_memory': memory, + } + + @staticmethod + def get_placement_from_ansible_params(params): + placement = params['placement'] or {} + constraints = get_value( + 'constraints', + placement + ) + + preferences = placement.get('preferences') + return { + 'constraints': constraints, + 'placement_preferences': preferences, + } + + @classmethod + def from_ansible_params( + cls, + ap, + old_service, + image_digest, + secret_ids, + config_ids, + network_ids, + docker_api_version, + docker_py_version, + ): + s = DockerService(docker_api_version, docker_py_version) + s.image = image_digest + s.args = ap['args'] + s.endpoint_mode = ap['endpoint_mode'] + s.dns = ap['dns'] + s.dns_search = ap['dns_search'] + s.dns_options = ap['dns_options'] + s.healthcheck, s.healthcheck_disabled = parse_healthcheck(ap['healthcheck']) + s.hostname = ap['hostname'] + s.hosts = ap['hosts'] + s.tty = ap['tty'] + s.labels = ap['labels'] + s.container_labels = ap['container_labels'] + s.mode = ap['mode'] + s.stop_signal = ap['stop_signal'] + s.user = ap['user'] + s.working_dir = ap['working_dir'] + s.read_only = ap['read_only'] + s.init = ap['init'] + + s.networks = get_docker_networks(ap['networks'], network_ids) + + s.command = ap['command'] + if isinstance(s.command, string_types): + s.command = shlex.split(s.command) + elif isinstance(s.command, list): + invalid_items = [ + (index, item) + for index, item in enumerate(s.command) + if not isinstance(item, string_types) + ] + if invalid_items: + errors = ', '.join( + [ + '%s (%s) at index %s' % (item, type(item), index) + for index, item in invalid_items + ] + ) + raise Exception( + 'All items in a command list need to be strings. ' + 'Check quoting. Invalid items: %s.' + % errors + ) + s.command = ap['command'] + elif s.command is not None: + raise ValueError( + 'Invalid type for command %s (%s). ' + 'Only string or list allowed. Check quoting.' + % (s.command, type(s.command)) + ) + + s.env = get_docker_environment(ap['env'], ap['env_files']) + s.rollback_config = cls.get_rollback_config_from_ansible_params(ap) + + update_config = cls.get_update_config_from_ansible_params(ap) + for key, value in update_config.items(): + setattr(s, key, value) + + restart_config = cls.get_restart_config_from_ansible_params(ap) + for key, value in restart_config.items(): + setattr(s, key, value) + + logging_config = cls.get_logging_from_ansible_params(ap) + for key, value in logging_config.items(): + setattr(s, key, value) + + limits = cls.get_limits_from_ansible_params(ap) + for key, value in limits.items(): + setattr(s, key, value) + + reservations = cls.get_reservations_from_ansible_params(ap) + for key, value in reservations.items(): + setattr(s, key, value) + + placement = cls.get_placement_from_ansible_params(ap) + for key, value in placement.items(): + setattr(s, key, value) + + if ap['stop_grace_period'] is not None: + s.stop_grace_period = convert_duration_to_nanosecond(ap['stop_grace_period']) + + if ap['force_update']: + s.force_update = int(str(time.time()).replace('.', '')) + + if ap['groups'] is not None: + # In case integers are passed as groups, we need to convert them to + # strings as docker internally treats them as strings. + s.groups = [str(g) for g in ap['groups']] + + if ap['replicas'] == -1: + if old_service: + s.replicas = old_service.replicas + else: + s.replicas = 1 + else: + s.replicas = ap['replicas'] + + if ap['publish'] is not None: + s.publish = [] + for param_p in ap['publish']: + service_p = {} + service_p['protocol'] = param_p['protocol'] + service_p['mode'] = param_p['mode'] + service_p['published_port'] = param_p['published_port'] + service_p['target_port'] = param_p['target_port'] + s.publish.append(service_p) + + if ap['mounts'] is not None: + s.mounts = [] + for param_m in ap['mounts']: + service_m = {} + service_m['readonly'] = param_m['readonly'] + service_m['type'] = param_m['type'] + if param_m['source'] is None and param_m['type'] != 'tmpfs': + raise ValueError('Source must be specified for mounts which are not of type tmpfs') + service_m['source'] = param_m['source'] or '' + service_m['target'] = param_m['target'] + service_m['labels'] = param_m['labels'] + service_m['no_copy'] = param_m['no_copy'] + service_m['propagation'] = param_m['propagation'] + service_m['driver_config'] = param_m['driver_config'] + service_m['tmpfs_mode'] = param_m['tmpfs_mode'] + tmpfs_size = param_m['tmpfs_size'] + if tmpfs_size is not None: + try: + tmpfs_size = human_to_bytes(tmpfs_size) + except ValueError as exc: + raise ValueError( + 'Failed to convert tmpfs_size to bytes: %s' % exc + ) + + service_m['tmpfs_size'] = tmpfs_size + s.mounts.append(service_m) + + if ap['configs'] is not None: + s.configs = [] + for param_m in ap['configs']: + service_c = {} + config_name = param_m['config_name'] + service_c['config_id'] = param_m['config_id'] or config_ids[config_name] + service_c['config_name'] = config_name + service_c['filename'] = param_m['filename'] or config_name + service_c['uid'] = param_m['uid'] + service_c['gid'] = param_m['gid'] + service_c['mode'] = param_m['mode'] + s.configs.append(service_c) + + if ap['secrets'] is not None: + s.secrets = [] + for param_m in ap['secrets']: + service_s = {} + secret_name = param_m['secret_name'] + service_s['secret_id'] = param_m['secret_id'] or secret_ids[secret_name] + service_s['secret_name'] = secret_name + service_s['filename'] = param_m['filename'] or secret_name + service_s['uid'] = param_m['uid'] + service_s['gid'] = param_m['gid'] + service_s['mode'] = param_m['mode'] + s.secrets.append(service_s) + + return s + + def compare(self, os): + differences = DifferenceTracker() + needs_rebuild = False + force_update = False + if self.endpoint_mode is not None and self.endpoint_mode != os.endpoint_mode: + differences.add('endpoint_mode', parameter=self.endpoint_mode, active=os.endpoint_mode) + if has_list_changed(self.env, os.env): + differences.add('env', parameter=self.env, active=os.env) + if self.log_driver is not None and self.log_driver != os.log_driver: + differences.add('log_driver', parameter=self.log_driver, active=os.log_driver) + if self.log_driver_options is not None and self.log_driver_options != (os.log_driver_options or {}): + differences.add('log_opt', parameter=self.log_driver_options, active=os.log_driver_options) + if self.mode != os.mode: + needs_rebuild = True + differences.add('mode', parameter=self.mode, active=os.mode) + if has_list_changed(self.mounts, os.mounts, sort_key='target'): + differences.add('mounts', parameter=self.mounts, active=os.mounts) + if has_list_changed(self.configs, os.configs, sort_key='config_name'): + differences.add('configs', parameter=self.configs, active=os.configs) + if has_list_changed(self.secrets, os.secrets, sort_key='secret_name'): + differences.add('secrets', parameter=self.secrets, active=os.secrets) + if have_networks_changed(self.networks, os.networks): + differences.add('networks', parameter=self.networks, active=os.networks) + needs_rebuild = not self.can_update_networks + if self.replicas != os.replicas: + differences.add('replicas', parameter=self.replicas, active=os.replicas) + if has_list_changed(self.command, os.command, sort_lists=False): + differences.add('command', parameter=self.command, active=os.command) + if has_list_changed(self.args, os.args, sort_lists=False): + differences.add('args', parameter=self.args, active=os.args) + if has_list_changed(self.constraints, os.constraints): + differences.add('constraints', parameter=self.constraints, active=os.constraints) + if has_list_changed(self.placement_preferences, os.placement_preferences, sort_lists=False): + differences.add('placement_preferences', parameter=self.placement_preferences, active=os.placement_preferences) + if has_list_changed(self.groups, os.groups): + differences.add('groups', parameter=self.groups, active=os.groups) + if self.labels is not None and self.labels != (os.labels or {}): + differences.add('labels', parameter=self.labels, active=os.labels) + if self.limit_cpu is not None and self.limit_cpu != os.limit_cpu: + differences.add('limit_cpu', parameter=self.limit_cpu, active=os.limit_cpu) + if self.limit_memory is not None and self.limit_memory != os.limit_memory: + differences.add('limit_memory', parameter=self.limit_memory, active=os.limit_memory) + if self.reserve_cpu is not None and self.reserve_cpu != os.reserve_cpu: + differences.add('reserve_cpu', parameter=self.reserve_cpu, active=os.reserve_cpu) + if self.reserve_memory is not None and self.reserve_memory != os.reserve_memory: + differences.add('reserve_memory', parameter=self.reserve_memory, active=os.reserve_memory) + if self.container_labels is not None and self.container_labels != (os.container_labels or {}): + differences.add('container_labels', parameter=self.container_labels, active=os.container_labels) + if self.stop_signal is not None and self.stop_signal != os.stop_signal: + differences.add('stop_signal', parameter=self.stop_signal, active=os.stop_signal) + if self.stop_grace_period is not None and self.stop_grace_period != os.stop_grace_period: + differences.add('stop_grace_period', parameter=self.stop_grace_period, active=os.stop_grace_period) + if self.has_publish_changed(os.publish): + differences.add('publish', parameter=self.publish, active=os.publish) + if self.read_only is not None and self.read_only != os.read_only: + differences.add('read_only', parameter=self.read_only, active=os.read_only) + if self.restart_policy is not None and self.restart_policy != os.restart_policy: + differences.add('restart_policy', parameter=self.restart_policy, active=os.restart_policy) + if self.restart_policy_attempts is not None and self.restart_policy_attempts != os.restart_policy_attempts: + differences.add('restart_policy_attempts', parameter=self.restart_policy_attempts, active=os.restart_policy_attempts) + if self.restart_policy_delay is not None and self.restart_policy_delay != os.restart_policy_delay: + differences.add('restart_policy_delay', parameter=self.restart_policy_delay, active=os.restart_policy_delay) + if self.restart_policy_window is not None and self.restart_policy_window != os.restart_policy_window: + differences.add('restart_policy_window', parameter=self.restart_policy_window, active=os.restart_policy_window) + if has_dict_changed(self.rollback_config, os.rollback_config): + differences.add('rollback_config', parameter=self.rollback_config, active=os.rollback_config) + if self.update_delay is not None and self.update_delay != os.update_delay: + differences.add('update_delay', parameter=self.update_delay, active=os.update_delay) + if self.update_parallelism is not None and self.update_parallelism != os.update_parallelism: + differences.add('update_parallelism', parameter=self.update_parallelism, active=os.update_parallelism) + if self.update_failure_action is not None and self.update_failure_action != os.update_failure_action: + differences.add('update_failure_action', parameter=self.update_failure_action, active=os.update_failure_action) + if self.update_monitor is not None and self.update_monitor != os.update_monitor: + differences.add('update_monitor', parameter=self.update_monitor, active=os.update_monitor) + if self.update_max_failure_ratio is not None and self.update_max_failure_ratio != os.update_max_failure_ratio: + differences.add('update_max_failure_ratio', parameter=self.update_max_failure_ratio, active=os.update_max_failure_ratio) + if self.update_order is not None and self.update_order != os.update_order: + differences.add('update_order', parameter=self.update_order, active=os.update_order) + has_image_changed, change = self.has_image_changed(os.image) + if has_image_changed: + differences.add('image', parameter=self.image, active=change) + if self.user and self.user != os.user: + differences.add('user', parameter=self.user, active=os.user) + if has_list_changed(self.dns, os.dns, sort_lists=False): + differences.add('dns', parameter=self.dns, active=os.dns) + if has_list_changed(self.dns_search, os.dns_search, sort_lists=False): + differences.add('dns_search', parameter=self.dns_search, active=os.dns_search) + if has_list_changed(self.dns_options, os.dns_options): + differences.add('dns_options', parameter=self.dns_options, active=os.dns_options) + if self.has_healthcheck_changed(os): + differences.add('healthcheck', parameter=self.healthcheck, active=os.healthcheck) + if self.hostname is not None and self.hostname != os.hostname: + differences.add('hostname', parameter=self.hostname, active=os.hostname) + if self.hosts is not None and self.hosts != (os.hosts or {}): + differences.add('hosts', parameter=self.hosts, active=os.hosts) + if self.tty is not None and self.tty != os.tty: + differences.add('tty', parameter=self.tty, active=os.tty) + if self.working_dir is not None and self.working_dir != os.working_dir: + differences.add('working_dir', parameter=self.working_dir, active=os.working_dir) + if self.force_update: + force_update = True + if self.init is not None and self.init != os.init: + differences.add('init', parameter=self.init, active=os.init) + return not differences.empty or force_update, differences, needs_rebuild, force_update + + def has_healthcheck_changed(self, old_publish): + if self.healthcheck_disabled is False and self.healthcheck is None: + return False + if self.healthcheck_disabled: + if old_publish.healthcheck is None: + return False + if old_publish.healthcheck.get('test') == ['NONE']: + return False + return self.healthcheck != old_publish.healthcheck + + def has_publish_changed(self, old_publish): + if self.publish is None: + return False + old_publish = old_publish or [] + if len(self.publish) != len(old_publish): + return True + publish_sorter = operator.itemgetter('published_port', 'target_port', 'protocol') + publish = sorted(self.publish, key=publish_sorter) + old_publish = sorted(old_publish, key=publish_sorter) + for publish_item, old_publish_item in zip(publish, old_publish): + ignored_keys = set() + if not publish_item.get('mode'): + ignored_keys.add('mode') + # Create copies of publish_item dicts where keys specified in ignored_keys are left out + filtered_old_publish_item = dict( + (k, v) for k, v in old_publish_item.items() if k not in ignored_keys + ) + filtered_publish_item = dict( + (k, v) for k, v in publish_item.items() if k not in ignored_keys + ) + if filtered_publish_item != filtered_old_publish_item: + return True + return False + + def has_image_changed(self, old_image): + if '@' not in self.image: + old_image = old_image.split('@')[0] + return self.image != old_image, old_image + + def build_container_spec(self): + mounts = None + if self.mounts is not None: + mounts = [] + for mount_config in self.mounts: + mount_options = { + 'target': 'target', + 'source': 'source', + 'type': 'type', + 'readonly': 'read_only', + 'propagation': 'propagation', + 'labels': 'labels', + 'no_copy': 'no_copy', + 'driver_config': 'driver_config', + 'tmpfs_size': 'tmpfs_size', + 'tmpfs_mode': 'tmpfs_mode' + } + mount_args = {} + for option, mount_arg in mount_options.items(): + value = mount_config.get(option) + if value is not None: + mount_args[mount_arg] = value + + mounts.append(types.Mount(**mount_args)) + + configs = None + if self.configs is not None: + configs = [] + for config_config in self.configs: + config_args = { + 'config_id': config_config['config_id'], + 'config_name': config_config['config_name'] + } + filename = config_config.get('filename') + if filename: + config_args['filename'] = filename + uid = config_config.get('uid') + if uid: + config_args['uid'] = uid + gid = config_config.get('gid') + if gid: + config_args['gid'] = gid + mode = config_config.get('mode') + if mode: + config_args['mode'] = mode + + configs.append(types.ConfigReference(**config_args)) + + secrets = None + if self.secrets is not None: + secrets = [] + for secret_config in self.secrets: + secret_args = { + 'secret_id': secret_config['secret_id'], + 'secret_name': secret_config['secret_name'] + } + filename = secret_config.get('filename') + if filename: + secret_args['filename'] = filename + uid = secret_config.get('uid') + if uid: + secret_args['uid'] = uid + gid = secret_config.get('gid') + if gid: + secret_args['gid'] = gid + mode = secret_config.get('mode') + if mode: + secret_args['mode'] = mode + + secrets.append(types.SecretReference(**secret_args)) + + dns_config_args = {} + if self.dns is not None: + dns_config_args['nameservers'] = self.dns + if self.dns_search is not None: + dns_config_args['search'] = self.dns_search + if self.dns_options is not None: + dns_config_args['options'] = self.dns_options + dns_config = types.DNSConfig(**dns_config_args) if dns_config_args else None + + container_spec_args = {} + if self.command is not None: + container_spec_args['command'] = self.command + if self.args is not None: + container_spec_args['args'] = self.args + if self.env is not None: + container_spec_args['env'] = self.env + if self.user is not None: + container_spec_args['user'] = self.user + if self.container_labels is not None: + container_spec_args['labels'] = self.container_labels + if self.healthcheck is not None: + container_spec_args['healthcheck'] = types.Healthcheck(**self.healthcheck) + elif self.healthcheck_disabled: + container_spec_args['healthcheck'] = types.Healthcheck(test=['NONE']) + if self.hostname is not None: + container_spec_args['hostname'] = self.hostname + if self.hosts is not None: + container_spec_args['hosts'] = self.hosts + if self.read_only is not None: + container_spec_args['read_only'] = self.read_only + if self.stop_grace_period is not None: + container_spec_args['stop_grace_period'] = self.stop_grace_period + if self.stop_signal is not None: + container_spec_args['stop_signal'] = self.stop_signal + if self.tty is not None: + container_spec_args['tty'] = self.tty + if self.groups is not None: + container_spec_args['groups'] = self.groups + if self.working_dir is not None: + container_spec_args['workdir'] = self.working_dir + if secrets is not None: + container_spec_args['secrets'] = secrets + if mounts is not None: + container_spec_args['mounts'] = mounts + if dns_config is not None: + container_spec_args['dns_config'] = dns_config + if configs is not None: + container_spec_args['configs'] = configs + if self.init is not None: + container_spec_args['init'] = self.init + + return types.ContainerSpec(self.image, **container_spec_args) + + def build_placement(self): + placement_args = {} + if self.constraints is not None: + placement_args['constraints'] = self.constraints + if self.placement_preferences is not None: + placement_args['preferences'] = [ + {key.title(): {'SpreadDescriptor': value}} + for preference in self.placement_preferences + for key, value in preference.items() + ] + return types.Placement(**placement_args) if placement_args else None + + def build_update_config(self): + update_config_args = {} + if self.update_parallelism is not None: + update_config_args['parallelism'] = self.update_parallelism + if self.update_delay is not None: + update_config_args['delay'] = self.update_delay + if self.update_failure_action is not None: + update_config_args['failure_action'] = self.update_failure_action + if self.update_monitor is not None: + update_config_args['monitor'] = self.update_monitor + if self.update_max_failure_ratio is not None: + update_config_args['max_failure_ratio'] = self.update_max_failure_ratio + if self.update_order is not None: + update_config_args['order'] = self.update_order + return types.UpdateConfig(**update_config_args) if update_config_args else None + + def build_log_driver(self): + log_driver_args = {} + if self.log_driver is not None: + log_driver_args['name'] = self.log_driver + if self.log_driver_options is not None: + log_driver_args['options'] = self.log_driver_options + return types.DriverConfig(**log_driver_args) if log_driver_args else None + + def build_restart_policy(self): + restart_policy_args = {} + if self.restart_policy is not None: + restart_policy_args['condition'] = self.restart_policy + if self.restart_policy_delay is not None: + restart_policy_args['delay'] = self.restart_policy_delay + if self.restart_policy_attempts is not None: + restart_policy_args['max_attempts'] = self.restart_policy_attempts + if self.restart_policy_window is not None: + restart_policy_args['window'] = self.restart_policy_window + return types.RestartPolicy(**restart_policy_args) if restart_policy_args else None + + def build_rollback_config(self): + if self.rollback_config is None: + return None + rollback_config_options = [ + 'parallelism', + 'delay', + 'failure_action', + 'monitor', + 'max_failure_ratio', + 'order', + ] + rollback_config_args = {} + for option in rollback_config_options: + value = self.rollback_config.get(option) + if value is not None: + rollback_config_args[option] = value + return types.RollbackConfig(**rollback_config_args) if rollback_config_args else None + + def build_resources(self): + resources_args = {} + if self.limit_cpu is not None: + resources_args['cpu_limit'] = int(self.limit_cpu * 1000000000.0) + if self.limit_memory is not None: + resources_args['mem_limit'] = self.limit_memory + if self.reserve_cpu is not None: + resources_args['cpu_reservation'] = int(self.reserve_cpu * 1000000000.0) + if self.reserve_memory is not None: + resources_args['mem_reservation'] = self.reserve_memory + return types.Resources(**resources_args) if resources_args else None + + def build_task_template(self, container_spec, placement=None): + log_driver = self.build_log_driver() + restart_policy = self.build_restart_policy() + resources = self.build_resources() + + task_template_args = {} + if placement is not None: + task_template_args['placement'] = placement + if log_driver is not None: + task_template_args['log_driver'] = log_driver + if restart_policy is not None: + task_template_args['restart_policy'] = restart_policy + if resources is not None: + task_template_args['resources'] = resources + if self.force_update: + task_template_args['force_update'] = self.force_update + if self.can_use_task_template_networks: + networks = self.build_networks() + if networks: + task_template_args['networks'] = networks + return types.TaskTemplate(container_spec=container_spec, **task_template_args) + + def build_service_mode(self): + if self.mode == 'global': + self.replicas = None + return types.ServiceMode(self.mode, replicas=self.replicas) + + def build_networks(self): + networks = None + if self.networks is not None: + networks = [] + for network in self.networks: + docker_network = {'Target': network['id']} + if 'aliases' in network: + docker_network['Aliases'] = network['aliases'] + if 'options' in network: + docker_network['DriverOpts'] = network['options'] + networks.append(docker_network) + return networks + + def build_endpoint_spec(self): + endpoint_spec_args = {} + if self.publish is not None: + ports = [] + for port in self.publish: + port_spec = { + 'Protocol': port['protocol'], + 'PublishedPort': port['published_port'], + 'TargetPort': port['target_port'] + } + if port.get('mode'): + port_spec['PublishMode'] = port['mode'] + ports.append(port_spec) + endpoint_spec_args['ports'] = ports + if self.endpoint_mode is not None: + endpoint_spec_args['mode'] = self.endpoint_mode + return types.EndpointSpec(**endpoint_spec_args) if endpoint_spec_args else None + + def build_docker_service(self): + container_spec = self.build_container_spec() + placement = self.build_placement() + task_template = self.build_task_template(container_spec, placement) + + update_config = self.build_update_config() + rollback_config = self.build_rollback_config() + service_mode = self.build_service_mode() + endpoint_spec = self.build_endpoint_spec() + + service = {'task_template': task_template, 'mode': service_mode} + if update_config: + service['update_config'] = update_config + if rollback_config: + service['rollback_config'] = rollback_config + if endpoint_spec: + service['endpoint_spec'] = endpoint_spec + if self.labels: + service['labels'] = self.labels + if not self.can_use_task_template_networks: + networks = self.build_networks() + if networks: + service['networks'] = networks + return service + + +class DockerServiceManager(object): + + def __init__(self, client): + self.client = client + self.retries = 2 + self.diff_tracker = None + + def get_service(self, name): + try: + raw_data = self.client.inspect_service(name) + except NotFound: + return None + ds = DockerService(self.client.docker_api_version, self.client.docker_py_version) + + task_template_data = raw_data['Spec']['TaskTemplate'] + ds.image = task_template_data['ContainerSpec']['Image'] + ds.user = task_template_data['ContainerSpec'].get('User') + ds.env = task_template_data['ContainerSpec'].get('Env') + ds.command = task_template_data['ContainerSpec'].get('Command') + ds.args = task_template_data['ContainerSpec'].get('Args') + ds.groups = task_template_data['ContainerSpec'].get('Groups') + ds.stop_grace_period = task_template_data['ContainerSpec'].get('StopGracePeriod') + ds.stop_signal = task_template_data['ContainerSpec'].get('StopSignal') + ds.working_dir = task_template_data['ContainerSpec'].get('Dir') + ds.read_only = task_template_data['ContainerSpec'].get('ReadOnly') + + healthcheck_data = task_template_data['ContainerSpec'].get('Healthcheck') + if healthcheck_data: + options = { + 'Test': 'test', + 'Interval': 'interval', + 'Timeout': 'timeout', + 'StartPeriod': 'start_period', + 'Retries': 'retries' + } + healthcheck = dict( + (options[key], value) for key, value in healthcheck_data.items() + if value is not None and key in options + ) + ds.healthcheck = healthcheck + + update_config_data = raw_data['Spec'].get('UpdateConfig') + if update_config_data: + ds.update_delay = update_config_data.get('Delay') + ds.update_parallelism = update_config_data.get('Parallelism') + ds.update_failure_action = update_config_data.get('FailureAction') + ds.update_monitor = update_config_data.get('Monitor') + ds.update_max_failure_ratio = update_config_data.get('MaxFailureRatio') + ds.update_order = update_config_data.get('Order') + + rollback_config_data = raw_data['Spec'].get('RollbackConfig') + if rollback_config_data: + ds.rollback_config = { + 'parallelism': rollback_config_data.get('Parallelism'), + 'delay': rollback_config_data.get('Delay'), + 'failure_action': rollback_config_data.get('FailureAction'), + 'monitor': rollback_config_data.get('Monitor'), + 'max_failure_ratio': rollback_config_data.get('MaxFailureRatio'), + 'order': rollback_config_data.get('Order'), + } + + dns_config = task_template_data['ContainerSpec'].get('DNSConfig') + if dns_config: + ds.dns = dns_config.get('Nameservers') + ds.dns_search = dns_config.get('Search') + ds.dns_options = dns_config.get('Options') + + ds.hostname = task_template_data['ContainerSpec'].get('Hostname') + + hosts = task_template_data['ContainerSpec'].get('Hosts') + if hosts: + hosts = [ + list(reversed(host.split(":", 1))) + if ":" in host + else host.split(" ", 1) + for host in hosts + ] + ds.hosts = dict((hostname, ip) for ip, hostname in hosts) + ds.tty = task_template_data['ContainerSpec'].get('TTY') + + placement = task_template_data.get('Placement') + if placement: + ds.constraints = placement.get('Constraints') + placement_preferences = [] + for preference in placement.get('Preferences', []): + placement_preferences.append( + dict( + (key.lower(), value['SpreadDescriptor']) + for key, value in preference.items() + ) + ) + ds.placement_preferences = placement_preferences or None + + restart_policy_data = task_template_data.get('RestartPolicy') + if restart_policy_data: + ds.restart_policy = restart_policy_data.get('Condition') + ds.restart_policy_delay = restart_policy_data.get('Delay') + ds.restart_policy_attempts = restart_policy_data.get('MaxAttempts') + ds.restart_policy_window = restart_policy_data.get('Window') + + raw_data_endpoint_spec = raw_data['Spec'].get('EndpointSpec') + if raw_data_endpoint_spec: + ds.endpoint_mode = raw_data_endpoint_spec.get('Mode') + raw_data_ports = raw_data_endpoint_spec.get('Ports') + if raw_data_ports: + ds.publish = [] + for port in raw_data_ports: + ds.publish.append({ + 'protocol': port['Protocol'], + 'mode': port.get('PublishMode', None), + 'published_port': int(port['PublishedPort']), + 'target_port': int(port['TargetPort']) + }) + + raw_data_limits = task_template_data.get('Resources', {}).get('Limits') + if raw_data_limits: + raw_cpu_limits = raw_data_limits.get('NanoCPUs') + if raw_cpu_limits: + ds.limit_cpu = float(raw_cpu_limits) / 1000000000 + + raw_memory_limits = raw_data_limits.get('MemoryBytes') + if raw_memory_limits: + ds.limit_memory = int(raw_memory_limits) + + raw_data_reservations = task_template_data.get('Resources', {}).get('Reservations') + if raw_data_reservations: + raw_cpu_reservations = raw_data_reservations.get('NanoCPUs') + if raw_cpu_reservations: + ds.reserve_cpu = float(raw_cpu_reservations) / 1000000000 + + raw_memory_reservations = raw_data_reservations.get('MemoryBytes') + if raw_memory_reservations: + ds.reserve_memory = int(raw_memory_reservations) + + ds.labels = raw_data['Spec'].get('Labels') + ds.log_driver = task_template_data.get('LogDriver', {}).get('Name') + ds.log_driver_options = task_template_data.get('LogDriver', {}).get('Options') + ds.container_labels = task_template_data['ContainerSpec'].get('Labels') + + mode = raw_data['Spec']['Mode'] + if 'Replicated' in mode.keys(): + ds.mode = to_text('replicated', encoding='utf-8') + ds.replicas = mode['Replicated']['Replicas'] + elif 'Global' in mode.keys(): + ds.mode = 'global' + else: + raise Exception('Unknown service mode: %s' % mode) + + raw_data_mounts = task_template_data['ContainerSpec'].get('Mounts') + if raw_data_mounts: + ds.mounts = [] + for mount_data in raw_data_mounts: + bind_options = mount_data.get('BindOptions', {}) + volume_options = mount_data.get('VolumeOptions', {}) + tmpfs_options = mount_data.get('TmpfsOptions', {}) + driver_config = volume_options.get('DriverConfig', {}) + driver_config = dict( + (key.lower(), value) for key, value in driver_config.items() + ) or None + ds.mounts.append({ + 'source': mount_data.get('Source', ''), + 'type': mount_data['Type'], + 'target': mount_data['Target'], + 'readonly': mount_data.get('ReadOnly'), + 'propagation': bind_options.get('Propagation'), + 'no_copy': volume_options.get('NoCopy'), + 'labels': volume_options.get('Labels'), + 'driver_config': driver_config, + 'tmpfs_mode': tmpfs_options.get('Mode'), + 'tmpfs_size': tmpfs_options.get('SizeBytes'), + }) + + raw_data_configs = task_template_data['ContainerSpec'].get('Configs') + if raw_data_configs: + ds.configs = [] + for config_data in raw_data_configs: + ds.configs.append({ + 'config_id': config_data['ConfigID'], + 'config_name': config_data['ConfigName'], + 'filename': config_data['File'].get('Name'), + 'uid': config_data['File'].get('UID'), + 'gid': config_data['File'].get('GID'), + 'mode': config_data['File'].get('Mode') + }) + + raw_data_secrets = task_template_data['ContainerSpec'].get('Secrets') + if raw_data_secrets: + ds.secrets = [] + for secret_data in raw_data_secrets: + ds.secrets.append({ + 'secret_id': secret_data['SecretID'], + 'secret_name': secret_data['SecretName'], + 'filename': secret_data['File'].get('Name'), + 'uid': secret_data['File'].get('UID'), + 'gid': secret_data['File'].get('GID'), + 'mode': secret_data['File'].get('Mode') + }) + + raw_networks_data = task_template_data.get('Networks', raw_data['Spec'].get('Networks')) + if raw_networks_data: + ds.networks = [] + for network_data in raw_networks_data: + network = {'id': network_data['Target']} + if 'Aliases' in network_data: + network['aliases'] = network_data['Aliases'] + if 'DriverOpts' in network_data: + network['options'] = network_data['DriverOpts'] + ds.networks.append(network) + ds.service_version = raw_data['Version']['Index'] + ds.service_id = raw_data['ID'] + + ds.init = task_template_data['ContainerSpec'].get('Init', False) + return ds + + def update_service(self, name, old_service, new_service): + service_data = new_service.build_docker_service() + result = self.client.update_service( + old_service.service_id, + old_service.service_version, + name=name, + **service_data + ) + # Prior to Docker SDK 4.0.0 no warnings were returned and will thus be ignored. + # (see https://github.com/docker/docker-py/pull/2272) + self.client.report_warnings(result, ['Warning']) + + def create_service(self, name, service): + service_data = service.build_docker_service() + result = self.client.create_service(name=name, **service_data) + self.client.report_warnings(result, ['Warning']) + + def remove_service(self, name): + self.client.remove_service(name) + + def get_image_digest(self, name, resolve=False): + if ( + not name + or not resolve + ): + return name + repo, tag = parse_repository_tag(name) + if not tag: + tag = 'latest' + name = repo + ':' + tag + distribution_data = self.client.inspect_distribution(name) + digest = distribution_data['Descriptor']['digest'] + return '%s@%s' % (name, digest) + + def get_networks_names_ids(self): + return dict( + (network['Name'], network['Id']) for network in self.client.networks() + ) + + def get_missing_secret_ids(self): + """ + Resolve missing secret ids by looking them up by name + """ + secret_names = [ + secret['secret_name'] + for secret in self.client.module.params.get('secrets') or [] + if secret['secret_id'] is None + ] + if not secret_names: + return {} + secrets = self.client.secrets(filters={'name': secret_names}) + secrets = dict( + (secret['Spec']['Name'], secret['ID']) + for secret in secrets + if secret['Spec']['Name'] in secret_names + ) + for secret_name in secret_names: + if secret_name not in secrets: + self.client.fail( + 'Could not find a secret named "%s"' % secret_name + ) + return secrets + + def get_missing_config_ids(self): + """ + Resolve missing config ids by looking them up by name + """ + config_names = [ + config['config_name'] + for config in self.client.module.params.get('configs') or [] + if config['config_id'] is None + ] + if not config_names: + return {} + configs = self.client.configs(filters={'name': config_names}) + configs = dict( + (config['Spec']['Name'], config['ID']) + for config in configs + if config['Spec']['Name'] in config_names + ) + for config_name in config_names: + if config_name not in configs: + self.client.fail( + 'Could not find a config named "%s"' % config_name + ) + return configs + + def run(self): + self.diff_tracker = DifferenceTracker() + module = self.client.module + + image = module.params['image'] + try: + image_digest = self.get_image_digest( + name=image, + resolve=module.params['resolve_image'] + ) + except DockerException as e: + self.client.fail( + 'Error looking for an image named %s: %s' + % (image, e) + ) + + try: + current_service = self.get_service(module.params['name']) + except Exception as e: + self.client.fail( + 'Error looking for service named %s: %s' + % (module.params['name'], e) + ) + try: + secret_ids = self.get_missing_secret_ids() + config_ids = self.get_missing_config_ids() + network_ids = self.get_networks_names_ids() + new_service = DockerService.from_ansible_params( + module.params, + current_service, + image_digest, + secret_ids, + config_ids, + network_ids, + self.client.docker_api_version, + self.client.docker_py_version + ) + except Exception as e: + return self.client.fail( + 'Error parsing module parameters: %s' % e + ) + + changed = False + msg = 'noop' + rebuilt = False + differences = DifferenceTracker() + facts = {} + + if current_service: + if module.params['state'] == 'absent': + if not module.check_mode: + self.remove_service(module.params['name']) + msg = 'Service removed' + changed = True + else: + changed, differences, need_rebuild, force_update = new_service.compare( + current_service + ) + if changed: + self.diff_tracker.merge(differences) + if need_rebuild: + if not module.check_mode: + self.remove_service(module.params['name']) + self.create_service( + module.params['name'], + new_service + ) + msg = 'Service rebuilt' + rebuilt = True + else: + if not module.check_mode: + self.update_service( + module.params['name'], + current_service, + new_service + ) + msg = 'Service updated' + rebuilt = False + else: + if force_update: + if not module.check_mode: + self.update_service( + module.params['name'], + current_service, + new_service + ) + msg = 'Service forcefully updated' + rebuilt = False + changed = True + else: + msg = 'Service unchanged' + facts = new_service.get_facts() + else: + if module.params['state'] == 'absent': + msg = 'Service absent' + else: + if not module.check_mode: + self.create_service(module.params['name'], new_service) + msg = 'Service created' + changed = True + facts = new_service.get_facts() + + return msg, changed, rebuilt, differences.get_legacy_docker_diffs(), facts + + def run_safe(self): + while True: + try: + return self.run() + except APIError as e: + # Sometimes Version.Index will have changed between an inspect and + # update. If this is encountered we'll retry the update. + if self.retries > 0 and 'update out of sequence' in str(e.explanation): + self.retries -= 1 + time.sleep(1) + else: + raise + + +def _detect_publish_mode_usage(client): + for publish_def in client.module.params['publish'] or []: + if publish_def.get('mode'): + return True + return False + + +def _detect_healthcheck_start_period(client): + if client.module.params['healthcheck']: + return client.module.params['healthcheck']['start_period'] is not None + return False + + +def _detect_mount_tmpfs_usage(client): + for mount in client.module.params['mounts'] or []: + if mount.get('type') == 'tmpfs': + return True + if mount.get('tmpfs_size') is not None: + return True + if mount.get('tmpfs_mode') is not None: + return True + return False + + +def _detect_update_config_failure_action_rollback(client): + rollback_config_failure_action = ( + (client.module.params['update_config'] or {}).get('failure_action') + ) + update_failure_action = client.module.params['update_failure_action'] + failure_action = rollback_config_failure_action or update_failure_action + return failure_action == 'rollback' + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + image=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'absent']), + mounts=dict(type='list', elements='dict', options=dict( + source=dict(type='str'), + target=dict(type='str', required=True), + type=dict( + type='str', + default='bind', + choices=['bind', 'volume', 'tmpfs', 'npipe'], + ), + readonly=dict(type='bool'), + labels=dict(type='dict'), + propagation=dict( + type='str', + choices=[ + 'shared', + 'slave', + 'private', + 'rshared', + 'rslave', + 'rprivate' + ] + ), + no_copy=dict(type='bool'), + driver_config=dict(type='dict', options=dict( + name=dict(type='str'), + options=dict(type='dict') + )), + tmpfs_size=dict(type='str'), + tmpfs_mode=dict(type='int') + )), + configs=dict(type='list', elements='dict', options=dict( + config_id=dict(type='str'), + config_name=dict(type='str', required=True), + filename=dict(type='str'), + uid=dict(type='str'), + gid=dict(type='str'), + mode=dict(type='int'), + )), + secrets=dict(type='list', elements='dict', options=dict( + secret_id=dict(type='str'), + secret_name=dict(type='str', required=True), + filename=dict(type='str'), + uid=dict(type='str'), + gid=dict(type='str'), + mode=dict(type='int'), + )), + networks=dict(type='list', elements='raw'), + command=dict(type='raw'), + args=dict(type='list', elements='str'), + env=dict(type='raw'), + env_files=dict(type='list', elements='path'), + force_update=dict(type='bool', default=False), + groups=dict(type='list', elements='str'), + logging=dict(type='dict', options=dict( + driver=dict(type='str'), + options=dict(type='dict'), + )), + publish=dict(type='list', elements='dict', options=dict( + published_port=dict(type='int', required=True), + target_port=dict(type='int', required=True), + protocol=dict(type='str', default='tcp', choices=['tcp', 'udp']), + mode=dict(type='str', choices=['ingress', 'host']), + )), + placement=dict(type='dict', options=dict( + constraints=dict(type='list', elements='str'), + preferences=dict(type='list', elements='dict'), + )), + tty=dict(type='bool'), + dns=dict(type='list', elements='str'), + dns_search=dict(type='list', elements='str'), + dns_options=dict(type='list', elements='str'), + healthcheck=dict(type='dict', options=dict( + test=dict(type='raw'), + interval=dict(type='str'), + timeout=dict(type='str'), + start_period=dict(type='str'), + retries=dict(type='int'), + )), + hostname=dict(type='str'), + hosts=dict(type='dict'), + labels=dict(type='dict'), + container_labels=dict(type='dict'), + mode=dict( + type='str', + default='replicated', + choices=['replicated', 'global'] + ), + replicas=dict(type='int', default=-1), + endpoint_mode=dict(type='str', choices=['vip', 'dnsrr']), + stop_grace_period=dict(type='str'), + stop_signal=dict(type='str'), + limits=dict(type='dict', options=dict( + cpus=dict(type='float'), + memory=dict(type='str'), + )), + read_only=dict(type='bool'), + reservations=dict(type='dict', options=dict( + cpus=dict(type='float'), + memory=dict(type='str'), + )), + resolve_image=dict(type='bool', default=False), + restart_config=dict(type='dict', options=dict( + condition=dict(type='str', choices=['none', 'on-failure', 'any']), + delay=dict(type='str'), + max_attempts=dict(type='int'), + window=dict(type='str'), + )), + rollback_config=dict(type='dict', options=dict( + parallelism=dict(type='int'), + delay=dict(type='str'), + failure_action=dict( + type='str', + choices=['continue', 'pause'] + ), + monitor=dict(type='str'), + max_failure_ratio=dict(type='float'), + order=dict(type='str'), + )), + update_config=dict(type='dict', options=dict( + parallelism=dict(type='int'), + delay=dict(type='str'), + failure_action=dict( + type='str', + choices=['continue', 'pause', 'rollback'] + ), + monitor=dict(type='str'), + max_failure_ratio=dict(type='float'), + order=dict(type='str'), + )), + user=dict(type='str'), + working_dir=dict(type='str'), + init=dict(type='bool'), + ) + + option_minimal_versions = dict( + dns=dict(docker_py_version='2.6.0', docker_api_version='1.25'), + dns_options=dict(docker_py_version='2.6.0', docker_api_version='1.25'), + dns_search=dict(docker_py_version='2.6.0', docker_api_version='1.25'), + endpoint_mode=dict(docker_py_version='3.0.0', docker_api_version='1.25'), + force_update=dict(docker_py_version='2.1.0', docker_api_version='1.25'), + healthcheck=dict(docker_py_version='2.6.0', docker_api_version='1.25'), + hostname=dict(docker_py_version='2.2.0', docker_api_version='1.25'), + hosts=dict(docker_py_version='2.6.0', docker_api_version='1.25'), + groups=dict(docker_py_version='2.6.0', docker_api_version='1.25'), + tty=dict(docker_py_version='2.4.0', docker_api_version='1.25'), + secrets=dict(docker_py_version='2.4.0', docker_api_version='1.25'), + configs=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + stop_signal=dict(docker_py_version='2.6.0', docker_api_version='1.28'), + publish=dict(docker_py_version='3.0.0', docker_api_version='1.25'), + read_only=dict(docker_py_version='2.6.0', docker_api_version='1.28'), + resolve_image=dict(docker_api_version='1.30', docker_py_version='3.2.0'), + rollback_config=dict(docker_py_version='3.5.0', docker_api_version='1.28'), + init=dict(docker_py_version='4.0.0', docker_api_version='1.37'), + # specials + publish_mode=dict( + docker_py_version='3.0.0', + docker_api_version='1.25', + detect_usage=_detect_publish_mode_usage, + usage_msg='set publish.mode' + ), + healthcheck_start_period=dict( + docker_py_version='2.6.0', + docker_api_version='1.29', + detect_usage=_detect_healthcheck_start_period, + usage_msg='set healthcheck.start_period' + ), + update_config_max_failure_ratio=dict( + docker_py_version='2.1.0', + docker_api_version='1.25', + detect_usage=lambda c: (c.module.params['update_config'] or {}).get( + 'max_failure_ratio' + ) is not None, + usage_msg='set update_config.max_failure_ratio' + ), + update_config_failure_action=dict( + docker_py_version='3.5.0', + docker_api_version='1.28', + detect_usage=_detect_update_config_failure_action_rollback, + usage_msg='set update_config.failure_action.rollback' + ), + update_config_monitor=dict( + docker_py_version='2.1.0', + docker_api_version='1.25', + detect_usage=lambda c: (c.module.params['update_config'] or {}).get( + 'monitor' + ) is not None, + usage_msg='set update_config.monitor' + ), + update_config_order=dict( + docker_py_version='2.7.0', + docker_api_version='1.29', + detect_usage=lambda c: (c.module.params['update_config'] or {}).get( + 'order' + ) is not None, + usage_msg='set update_config.order' + ), + placement_config_preferences=dict( + docker_py_version='2.4.0', + docker_api_version='1.27', + detect_usage=lambda c: (c.module.params['placement'] or {}).get( + 'preferences' + ) is not None, + usage_msg='set placement.preferences' + ), + placement_config_constraints=dict( + docker_py_version='2.4.0', + detect_usage=lambda c: (c.module.params['placement'] or {}).get( + 'constraints' + ) is not None, + usage_msg='set placement.constraints' + ), + mounts_tmpfs=dict( + docker_py_version='2.6.0', + detect_usage=_detect_mount_tmpfs_usage, + usage_msg='set mounts.tmpfs' + ), + rollback_config_order=dict( + docker_api_version='1.29', + detect_usage=lambda c: (c.module.params['rollback_config'] or {}).get( + 'order' + ) is not None, + usage_msg='set rollback_config.order' + ), + ) + required_if = [ + ('state', 'present', ['image']) + ] + + client = AnsibleDockerClient( + argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True, + min_docker_version='2.0.2', + min_docker_api_version='1.24', + option_minimal_versions=option_minimal_versions, + ) + + try: + dsm = DockerServiceManager(client) + msg, changed, rebuilt, changes, facts = dsm.run_safe() + + results = dict( + msg=msg, + changed=changed, + rebuilt=rebuilt, + changes=changes, + swarm_service=facts, + ) + if client.module._diff: + before, after = dsm.diff_tracker.get_before_after() + results['diff'] = dict(before=before, after=after) + + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py new file mode 100644 index 00000000..b1b25caa --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py @@ -0,0 +1,115 @@ +#!/usr/bin/python +# +# (c) 2019 Hannes Ljungberg <hannes.ljungberg@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_swarm_service_info + +short_description: Retrieves information about docker services from a Swarm Manager + +description: + - Retrieves information about a docker service. + - Essentially returns the output of C(docker service inspect <name>). + - Must be executed on a host running as Swarm Manager, otherwise the module will fail. + + +options: + name: + description: + - The name of the service to inspect. + type: str + required: yes +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + + +author: + - Hannes Ljungberg (@hannseman) + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0" + - "Docker API >= 1.24" +''' + +EXAMPLES = ''' +- name: Get info from a service + community.docker.docker_swarm_service_info: + name: myservice + register: result +''' + +RETURN = ''' +exists: + description: + - Returns whether the service exists. + type: bool + returned: always + sample: true +service: + description: + - A dictionary representing the current state of the service. Matches the C(docker service inspect) output. + - Will be C(none) if service does not exist. + returned: always + type: dict +''' + +import traceback + +try: + from docker.errors import DockerException +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.docker.plugins.module_utils.common import ( + RequestException, +) + +from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient + + +def get_service_info(client): + service = client.module.params['name'] + return client.get_service_inspect( + service_id=service, + skip_missing=True + ) + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + ) + + client = AnsibleDockerSwarmClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='2.0.0', + min_docker_api_version='1.24', + ) + + client.fail_task_if_not_swarm_manager() + + try: + service = get_service_info(client) + + client.module.exit_json( + changed=False, + service=service, + exists=bool(service) + ) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_volume.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_volume.py new file mode 100644 index 00000000..e504a591 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_volume.py @@ -0,0 +1,308 @@ +#!/usr/bin/python +# coding: utf-8 +# +# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: docker_volume +short_description: Manage Docker volumes +description: + - Create/remove Docker volumes. + - Performs largely the same function as the "docker volume" CLI subcommand. +options: + volume_name: + description: + - Name of the volume to operate on. + type: str + required: yes + aliases: + - name + + driver: + description: + - Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used. + type: str + default: local + + driver_options: + description: + - "Dictionary of volume settings. Consult docker docs for valid options and values: + U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options)" + type: dict + + labels: + description: + - Dictionary of label key/values to set for the volume + type: dict + + recreate: + description: + - Controls when a volume will be recreated when I(state) is C(present). Please + note that recreating an existing volume will cause **any data in the existing volume + to be lost!** The volume will be deleted and a new volume with the same name will be + created. + - The value C(always) forces the volume to be always recreated. + - The value C(never) makes sure the volume will not be recreated. + - The value C(options-changed) makes sure the volume will be recreated if the volume + already exist and the driver, driver options or labels differ. + type: str + default: never + choices: + - always + - never + - options-changed + + state: + description: + - C(absent) deletes the volume. + - C(present) creates the volume, if it does not already exist. + type: str + default: present + choices: + - absent + - present + +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + + +author: + - Alex Grönholm (@agronholm) + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "The docker server >= 1.9.0" +''' + +EXAMPLES = ''' +- name: Create a volume + community.docker.docker_volume: + name: volume_one + +- name: Remove a volume + community.docker.docker_volume: + name: volume_one + state: absent + +- name: Create a volume with options + community.docker.docker_volume: + name: volume_two + driver_options: + type: btrfs + device: /dev/sda2 +''' + +RETURN = ''' +volume: + description: + - Volume inspection results for the affected volume. + returned: success + type: dict + sample: {} +''' + +import traceback + +try: + from docker.errors import DockerException, APIError +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.docker.plugins.module_utils.common import ( + DockerBaseClass, + AnsibleDockerClient, + DifferenceTracker, + RequestException, +) +from ansible.module_utils.six import iteritems, text_type + + +class TaskParameters(DockerBaseClass): + def __init__(self, client): + super(TaskParameters, self).__init__() + self.client = client + + self.volume_name = None + self.driver = None + self.driver_options = None + self.labels = None + self.recreate = None + self.debug = None + + for key, value in iteritems(client.module.params): + setattr(self, key, value) + + +class DockerVolumeManager(object): + + def __init__(self, client): + self.client = client + self.parameters = TaskParameters(client) + self.check_mode = self.client.check_mode + self.results = { + u'changed': False, + u'actions': [] + } + self.diff = self.client.module._diff + self.diff_tracker = DifferenceTracker() + self.diff_result = dict() + + self.existing_volume = self.get_existing_volume() + + state = self.parameters.state + if state == 'present': + self.present() + elif state == 'absent': + self.absent() + + if self.diff or self.check_mode or self.parameters.debug: + if self.diff: + self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after() + self.results['diff'] = self.diff_result + + def get_existing_volume(self): + try: + volumes = self.client.volumes() + except APIError as e: + self.client.fail(text_type(e)) + + if volumes[u'Volumes'] is None: + return None + + for volume in volumes[u'Volumes']: + if volume['Name'] == self.parameters.volume_name: + return volume + + return None + + def has_different_config(self): + """ + Return the list of differences between the current parameters and the existing volume. + + :return: list of options that differ + """ + differences = DifferenceTracker() + if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']: + differences.add('driver', parameter=self.parameters.driver, active=self.existing_volume['Driver']) + if self.parameters.driver_options: + if not self.existing_volume.get('Options'): + differences.add('driver_options', + parameter=self.parameters.driver_options, + active=self.existing_volume.get('Options')) + else: + for key, value in iteritems(self.parameters.driver_options): + if (not self.existing_volume['Options'].get(key) or + value != self.existing_volume['Options'][key]): + differences.add('driver_options.%s' % key, + parameter=value, + active=self.existing_volume['Options'].get(key)) + if self.parameters.labels: + existing_labels = self.existing_volume.get('Labels', {}) + for label in self.parameters.labels: + if existing_labels.get(label) != self.parameters.labels.get(label): + differences.add('labels.%s' % label, + parameter=self.parameters.labels.get(label), + active=existing_labels.get(label)) + + return differences + + def create_volume(self): + if not self.existing_volume: + if not self.check_mode: + try: + params = dict( + driver=self.parameters.driver, + driver_opts=self.parameters.driver_options, + ) + + if self.parameters.labels is not None: + params['labels'] = self.parameters.labels + + resp = self.client.create_volume(self.parameters.volume_name, **params) + self.existing_volume = self.client.inspect_volume(resp['Name']) + except APIError as e: + self.client.fail(text_type(e)) + + self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver)) + self.results['changed'] = True + + def remove_volume(self): + if self.existing_volume: + if not self.check_mode: + try: + self.client.remove_volume(self.parameters.volume_name) + except APIError as e: + self.client.fail(text_type(e)) + + self.results['actions'].append("Removed volume %s" % self.parameters.volume_name) + self.results['changed'] = True + + def present(self): + differences = DifferenceTracker() + if self.existing_volume: + differences = self.has_different_config() + + self.diff_tracker.add('exists', parameter=True, active=self.existing_volume is not None) + if (not differences.empty and self.parameters.recreate == 'options-changed') or self.parameters.recreate == 'always': + self.remove_volume() + self.existing_volume = None + + self.create_volume() + + if self.diff or self.check_mode or self.parameters.debug: + self.diff_result['differences'] = differences.get_legacy_docker_diffs() + self.diff_tracker.merge(differences) + + if not self.check_mode and not self.parameters.debug: + self.results.pop('actions') + + volume_facts = self.get_existing_volume() + self.results['volume'] = volume_facts + + def absent(self): + self.diff_tracker.add('exists', parameter=False, active=self.existing_volume is not None) + self.remove_volume() + + +def main(): + argument_spec = dict( + volume_name=dict(type='str', required=True, aliases=['name']), + state=dict(type='str', default='present', choices=['present', 'absent']), + driver=dict(type='str', default='local'), + driver_options=dict(type='dict', default={}), + labels=dict(type='dict'), + recreate=dict(type='str', default='never', choices=['always', 'never', 'options-changed']), + debug=dict(type='bool', default=False) + ) + + option_minimal_versions = dict( + labels=dict(docker_py_version='1.10.0', docker_api_version='1.23'), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='1.10.0', + min_docker_api_version='1.21', + # "The docker server >= 1.9.0" + option_minimal_versions=option_minimal_versions, + ) + + try: + cm = DockerVolumeManager(client) + client.module.exit_json(**cm.results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_volume_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_volume_info.py new file mode 100644 index 00000000..8e1734c2 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_volume_info.py @@ -0,0 +1,128 @@ +#!/usr/bin/python +# coding: utf-8 +# +# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: docker_volume_info +short_description: Retrieve facts about Docker volumes +description: + - Performs largely the same function as the "docker volume inspect" CLI subcommand. +options: + name: + description: + - Name of the volume to inspect. + type: str + required: yes + aliases: + - volume_name + +extends_documentation_fragment: +- community.docker.docker +- community.docker.docker.docker_py_1_documentation + + +author: + - Felix Fontein (@felixfontein) + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.21" +''' + +EXAMPLES = ''' +- name: Get infos on volume + community.docker.docker_volume_info: + name: mydata + register: result + +- name: Does volume exist? + ansible.builtin.debug: + msg: "The volume {{ 'exists' if result.exists else 'does not exist' }}" + +- name: Print information about volume + ansible.builtin.debug: + var: result.volume + when: result.exists +''' + +RETURN = ''' +exists: + description: + - Returns whether the volume exists. + type: bool + returned: always + sample: true +volume: + description: + - Volume inspection results for the affected volume. + - Will be C(none) if volume does not exist. + returned: success + type: dict + sample: '{ + "CreatedAt": "2018-12-09T17:43:44+01:00", + "Driver": "local", + "Labels": null, + "Mountpoint": "/var/lib/docker/volumes/ansible-test-bd3f6172/_data", + "Name": "ansible-test-bd3f6172", + "Options": {}, + "Scope": "local" + }' +''' + +import traceback + +try: + from docker.errors import DockerException, NotFound +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClient, + RequestException, +) + + +def get_existing_volume(client, volume_name): + try: + return client.inspect_volume(volume_name) + except NotFound as dummy: + return None + except Exception as exc: + client.fail("Error inspecting volume: %s" % exc) + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True, aliases=['volume_name']), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='1.8.0', + min_docker_api_version='1.21', + ) + + try: + volume = get_existing_volume(client, client.module.params['name']) + + client.module.exit_json( + changed=False, + exists=(True if volume else False), + volume=volume, + ) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/plugin_utils/common.py b/collections-debian-merged/ansible_collections/community/docker/plugins/plugin_utils/common.py new file mode 100644 index 00000000..4742367e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/plugin_utils/common.py @@ -0,0 +1,33 @@ +# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible.errors import AnsibleConnectionFailure + + +from ansible_collections.community.docker.plugins.module_utils.common import ( + AnsibleDockerClientBase, + DOCKER_COMMON_ARGS, +) + + +class AnsibleDockerClient(AnsibleDockerClientBase): + def __init__(self, plugin, min_docker_version=None, min_docker_api_version=None): + self.plugin = plugin + super(AnsibleDockerClient, self).__init__( + min_docker_version=min_docker_version, + min_docker_api_version=min_docker_api_version) + + def fail(self, msg, **kwargs): + if kwargs: + msg += '\nContext:\n' + '\n'.join(' {0} = {1!r}'.format(k, v) for (k, v) in kwargs.items()) + raise AnsibleConnectionFailure(msg) + + def _get_params(self): + return dict([ + (option, self.plugin.get_option(option)) + for option in DOCKER_COMMON_ARGS + ]) diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py b/collections-debian-merged/ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py new file mode 100644 index 00000000..bbaa1565 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py @@ -0,0 +1,221 @@ +# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import fcntl +import os +import os.path +import socket as pysocket + +from ansible.compat import selectors +from ansible.module_utils.six import PY3 + +try: + from docker.utils import socket as docker_socket + import struct +except Exception: + # missing Docker SDK for Python handled in ansible_collections.community.docker.plugins.module_utils.common + pass + + +PARAMIKO_POLL_TIMEOUT = 0.01 # 10 milliseconds + + +class DockerSocketHandler: + def __init__(self, display, sock, container=None): + if hasattr(sock, '_sock'): + sock._sock.setblocking(0) + elif hasattr(sock, 'setblocking'): + sock.setblocking(0) + else: + fcntl.fcntl(sock.fileno(), fcntl.F_SETFL, fcntl.fcntl(sock.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK) + + self._display = display + self._paramiko_read_workaround = hasattr(sock, 'send_ready') and 'paramiko' in str(type(sock)) + + self._container = container + + self._sock = sock + self._block_done_callback = None + self._block_buffer = [] + self._eof = False + self._read_buffer = b'' + self._write_buffer = b'' + self._end_of_writing = False + + self._current_stream = None + self._current_missing = 0 + self._current_buffer = b'' + + self._selector = selectors.DefaultSelector() + self._selector.register(self._sock, selectors.EVENT_READ) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self._selector.close() + + def set_block_done_callback(self, block_done_callback): + self._block_done_callback = block_done_callback + if self._block_done_callback is not None: + while self._block_buffer: + elt = self._block_buffer.remove(0) + self._block_done_callback(*elt) + + def _add_block(self, stream_id, data): + if self._block_done_callback is not None: + self._block_done_callback(stream_id, data) + else: + self._block_buffer.append((stream_id, data)) + + def _read(self): + if self._eof: + return + if hasattr(self._sock, 'recv'): + try: + data = self._sock.recv(262144) + except Exception as e: + # After calling self._sock.shutdown(), OpenSSL's/urllib3's + # WrappedSocket seems to eventually raise ZeroReturnError in + # case of EOF + if 'OpenSSL.SSL.ZeroReturnError' in str(type(e)): + self._eof = True + return + else: + raise + elif PY3 and isinstance(self._sock, getattr(pysocket, 'SocketIO')): + data = self._sock.read() + else: + data = os.read(self._sock.fileno()) + if data is None: + # no data available + return + self._display.vvvv('read {0} bytes'.format(len(data)), host=self._container) + if len(data) == 0: + # Stream EOF + self._eof = True + return + self._read_buffer += data + while len(self._read_buffer) > 0: + if self._current_missing > 0: + n = min(len(self._read_buffer), self._current_missing) + self._current_buffer += self._read_buffer[:n] + self._read_buffer = self._read_buffer[n:] + self._current_missing -= n + if self._current_missing == 0: + self._add_block(self._current_stream, self._current_buffer) + self._current_buffer = b'' + if len(self._read_buffer) < 8: + break + self._current_stream, self._current_missing = struct.unpack('>BxxxL', self._read_buffer[:8]) + self._read_buffer = self._read_buffer[8:] + if self._current_missing < 0: + # Stream EOF (as reported by docker daemon) + self._eof = True + break + + def _handle_end_of_writing(self): + if self._end_of_writing and len(self._write_buffer) == 0: + self._end_of_writing = False + self._display.vvvv('Shutting socket down for writing', host=self._container) + if hasattr(self._sock, 'shutdown_write'): + self._sock.shutdown_write() + elif hasattr(self._sock, 'shutdown'): + try: + self._sock.shutdown(pysocket.SHUT_WR) + except TypeError as e: + # probably: "TypeError: shutdown() takes 1 positional argument but 2 were given" + self._display.vvvv('Shutting down for writing not possible; trying shutdown instead: {0}'.format(e), host=self._container) + self._sock.shutdown() + elif PY3 and isinstance(self._sock, getattr(pysocket, 'SocketIO')): + self._sock._sock.shutdown(pysocket.SHUT_WR) + else: + self._display.vvvv('No idea how to signal end of writing', host=self._container) + + def _write(self): + if len(self._write_buffer) > 0: + if hasattr(self._sock, '_send_until_done'): + # WrappedSocket (urllib3/contrib/pyopenssl) doesn't have `send`, but + # only `sendall`, which uses `_send_until_done` under the hood. + written = self._sock._send_until_done(self._write_buffer) + elif hasattr(self._sock, 'send'): + written = self._sock.send(self._write_buffer) + else: + written = os.write(self._sock.fileno(), self._write_buffer) + self._write_buffer = self._write_buffer[written:] + self._display.vvvv('wrote {0} bytes, {1} are left'.format(written, len(self._write_buffer)), host=self._container) + if len(self._write_buffer) > 0: + self._selector.modify(self._sock, selectors.EVENT_READ | selectors.EVENT_WRITE) + else: + self._selector.modify(self._sock, selectors.EVENT_READ) + self._handle_end_of_writing() + + def select(self, timeout=None, _internal_recursion=False): + if not _internal_recursion and self._paramiko_read_workaround and len(self._write_buffer) > 0: + # When the SSH transport is used, docker-py internally uses Paramiko, whose + # Channel object supports select(), but only for reading + # (https://github.com/paramiko/paramiko/issues/695). + if self._sock.send_ready(): + self._write() + return True + while timeout is None or timeout > PARAMIKO_POLL_TIMEOUT: + result = self.select(PARAMIKO_POLL_TIMEOUT, _internal_recursion=True) + if self._sock.send_ready(): + self._read() + result += 1 + if result > 0: + return True + if timeout is not None: + timeout -= PARAMIKO_POLL_TIMEOUT + self._display.vvvv('select... ({0})'.format(timeout), host=self._container) + events = self._selector.select(timeout) + for key, event in events: + if key.fileobj == self._sock: + self._display.vvvv( + 'select event read:{0} write:{1}'.format(event & selectors.EVENT_READ != 0, event & selectors.EVENT_WRITE != 0), + host=self._container) + if event & selectors.EVENT_READ != 0: + self._read() + if event & selectors.EVENT_WRITE != 0: + self._write() + result = len(events) + if self._paramiko_read_workaround and len(self._write_buffer) > 0: + if self._sock.send_ready(): + self._write() + result += 1 + return result > 0 + + def is_eof(self): + return self._eof + + def end_of_writing(self): + self._end_of_writing = True + self._handle_end_of_writing() + + def consume(self): + stdout = [] + stderr = [] + + def append_block(stream_id, data): + if stream_id == docker_socket.STDOUT: + stdout.append(data) + elif stream_id == docker_socket.STDERR: + stderr.append(data) + else: + raise ValueError('{0} is not a valid stream ID'.format(stream_id)) + + self.end_of_writing() + + self.set_block_done_callback(append_block) + while not self._eof: + self.select() + return b''.join(stdout), b''.join(stderr) + + def write(self, str): + self._write_buffer += str + if len(self._write_buffer) == len(str): + self._write() diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection/aliases new file mode 100644 index 00000000..136c05e0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection/aliases @@ -0,0 +1 @@ +hidden diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection/test.sh b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection/test.sh new file mode 100755 index 00000000..4e7aa8dd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection/test.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -eux + +[ -f "${INVENTORY}" ] + +# Run connection tests with both the default and C locale. + + ansible-playbook test_connection.yml -i "${INVENTORY}" "$@" +LC_ALL=C LANG=C ansible-playbook test_connection.yml -i "${INVENTORY}" "$@" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection/test_connection.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection/test_connection.yml new file mode 100644 index 00000000..a662e576 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection/test_connection.yml @@ -0,0 +1,43 @@ +- hosts: "{{ target_hosts }}" + gather_facts: no + serial: 1 + tasks: + + ### raw with unicode arg and output + + - name: raw with unicode arg and output + raw: echo æ±‰è¯ + register: command + - name: check output of raw with unicode arg and output + assert: + that: + - "'汉è¯' in command.stdout" + - command is changed # as of 2.2, raw should default to changed: true for consistency w/ shell/command/script modules + + ### copy local file with unicode filename and content + + - name: create local file with unicode filename and content + local_action: lineinfile dest={{ local_tmp }}-汉è¯/汉è¯.txt create=true line=æ±‰è¯ + - name: remove remote file with unicode filename and content + action: "{{ action_prefix }}file path={{ remote_tmp }}-汉è¯/汉è¯.txt state=absent" + - name: create remote directory with unicode name + action: "{{ action_prefix }}file path={{ remote_tmp }}-æ±‰è¯ state=directory" + - name: copy local file with unicode filename and content + action: "{{ action_prefix }}copy src={{ local_tmp }}-汉è¯/汉è¯.txt dest={{ remote_tmp }}-汉è¯/汉è¯.txt" + + ### fetch remote file with unicode filename and content + + - name: remove local file with unicode filename and content + local_action: file path={{ local_tmp }}-汉è¯/汉è¯.txt state=absent + - name: fetch remote file with unicode filename and content + fetch: src={{ remote_tmp }}-汉è¯/汉è¯.txt dest={{ local_tmp }}-汉è¯/汉è¯.txt fail_on_missing=true validate_checksum=true flat=true + + ### remove local and remote temp files + + - name: remove local temp file + local_action: file path={{ local_tmp }}-æ±‰è¯ state=absent + - name: remove remote temp file + action: "{{ action_prefix }}file path={{ remote_tmp }}-æ±‰è¯ state=absent" + + ### test wait_for_connection plugin + - ansible.builtin.wait_for_connection: diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/aliases new file mode 100644 index 00000000..8beddaed --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/aliases @@ -0,0 +1,3 @@ +shippable/posix/group4 +skip/docker # coverage does not work if we're inside a docker container, since we cannot access this container's /tmp dir from the new container +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/runme-connection.sh b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/runme-connection.sh new file mode 100755 index 00000000..d3976ff3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/runme-connection.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -eux + +# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir. +# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix. + +group=$(python -c \ + "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))") + +cd ../connection + +INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \ + -e target_hosts="${group}" \ + -e action_prefix= \ + -e local_tmp=/tmp/ansible-local \ + -e remote_tmp=/tmp/ansible-remote \ + "$@" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/runme.sh b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/runme.sh new file mode 100755 index 00000000..9b6a93d4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/runme.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +# If you use another image, you possibly also need to adjust +# ansible_python_interpreter in test_connection.inventory. +source ../setup_docker/vars/main.env +IMAGE="${DOCKER_TEST_IMAGE_PYTHON3}" + +# Setup phase + +echo "Setup" +ANSIBLE_ROLES_PATH=.. ansible-playbook setup.yml + +# If docker wasn't installed, don't run the tests +if [ "$(command -v docker)" == "" ]; then + exit +fi + + +# Test phase + +CONTAINER_SUFFIX=-${RANDOM} + +DOCKER_CONTAINERS="docker-connection-test-container${CONTAINER_SUFFIX}" + +[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x + +set -euo pipefail + +cleanup() { + echo "Cleanup" + docker rm -f ${DOCKER_CONTAINERS} + echo "Shutdown" + ANSIBLE_ROLES_PATH=.. ansible-playbook shutdown.yml + echo "Done" + exit 0 +} + +trap cleanup INT TERM EXIT + +echo "Start containers" +for CONTAINER in ${DOCKER_CONTAINERS}; do + if [ "${ANSIBLE_TEST_COVERAGE:-}" == "" ]; then + docker run --rm --name ${CONTAINER} --detach "${IMAGE}" /bin/sh -c 'sleep 10m' + else + docker run --rm --name ${CONTAINER} --detach -v /tmp:/tmp "${IMAGE}" /bin/sh -c 'sleep 10m' + docker exec ${CONTAINER} pip3 install coverage + fi + echo ${CONTAINER} +done + +cat > test_connection.inventory << EOF +[docker] +docker-no-pipelining ansible_pipelining=false +docker-pipelining ansible_pipelining=true + +[docker:vars] +ansible_host=docker-connection-test-container${CONTAINER_SUFFIX} +ansible_connection=community.docker.docker +ansible_python_interpreter=/usr/local/bin/python3 +EOF + +echo "Run tests" +./runme-connection.sh "$@" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/setup.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/setup.yml new file mode 100644 index 00000000..a709d2c7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/setup.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + connection: local + vars: + docker_skip_cleanup: yes + + tasks: + - name: Setup docker + import_role: + name: setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/shutdown.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/shutdown.yml new file mode 100644 index 00000000..e01d1210 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker/shutdown.yml @@ -0,0 +1,15 @@ +--- +- hosts: localhost + connection: local + vars: + docker_skip_cleanup: yes + + tasks: + - name: Remove docker packages + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: + - docker + - docker-ce + - docker-ce-cli + state: absent diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/aliases new file mode 100644 index 00000000..8beddaed --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/aliases @@ -0,0 +1,3 @@ +shippable/posix/group4 +skip/docker # coverage does not work if we're inside a docker container, since we cannot access this container's /tmp dir from the new container +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/runme-connection.sh b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/runme-connection.sh new file mode 100755 index 00000000..d3976ff3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/runme-connection.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -eux + +# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir. +# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix. + +group=$(python -c \ + "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))") + +cd ../connection + +INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \ + -e target_hosts="${group}" \ + -e action_prefix= \ + -e local_tmp=/tmp/ansible-local \ + -e remote_tmp=/tmp/ansible-remote \ + "$@" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/runme.sh b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/runme.sh new file mode 100755 index 00000000..8e486ac9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/runme.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +# If you use another image, you possibly also need to adjust +# ansible_python_interpreter in test_connection.inventory. +source ../setup_docker/vars/main.env +IMAGE="${DOCKER_TEST_IMAGE_PYTHON3}" + +# Setup phase + +echo "Setup" +ANSIBLE_ROLES_PATH=.. ansible-playbook setup.yml + +# If docker wasn't installed, don't run the tests +if [ "$(command -v docker)" == "" ]; then + exit +fi + + +# Test phase + +CONTAINER_SUFFIX=-${RANDOM} + +DOCKER_CONTAINERS="docker-connection-test-container${CONTAINER_SUFFIX}" + +[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x + +set -euo pipefail + +cleanup() { + echo "Cleanup" + docker rm -f ${DOCKER_CONTAINERS} + echo "Shutdown" + ANSIBLE_ROLES_PATH=.. ansible-playbook shutdown.yml + echo "Done" + exit 0 +} + +trap cleanup INT TERM EXIT + +echo "Start containers" +for CONTAINER in ${DOCKER_CONTAINERS}; do + if [ "${ANSIBLE_TEST_COVERAGE:-}" == "" ]; then + docker run --rm --name ${CONTAINER} --detach "${IMAGE}" /bin/sh -c 'sleep 10m' + else + docker run --rm --name ${CONTAINER} --detach -v /tmp:/tmp "${IMAGE}" /bin/sh -c 'sleep 10m' + docker exec ${CONTAINER} pip3 install coverage + fi + echo ${CONTAINER} +done + +cat > test_connection.inventory << EOF +[docker_api] +docker_api-no-pipelining ansible_pipelining=false +docker_api-pipelining ansible_pipelining=true + +[docker_api:vars] +ansible_host=docker-connection-test-container${CONTAINER_SUFFIX} +ansible_connection=community.docker.docker_api +ansible_python_interpreter=/usr/local/bin/python3 +EOF + +echo "Run tests" +./runme-connection.sh "$@" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/setup.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/setup.yml new file mode 100644 index 00000000..a709d2c7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/setup.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + connection: local + vars: + docker_skip_cleanup: yes + + tasks: + - name: Setup docker + import_role: + name: setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/shutdown.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/shutdown.yml new file mode 100644 index 00000000..e01d1210 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/shutdown.yml @@ -0,0 +1,15 @@ +--- +- hosts: localhost + connection: local + vars: + docker_skip_cleanup: yes + + tasks: + - name: Remove docker packages + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: + - docker + - docker-ce + - docker-ce-cli + state: absent diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_posix/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_posix/aliases new file mode 100644 index 00000000..f5e09799 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_posix/aliases @@ -0,0 +1,2 @@ +needs/target/connection +hidden diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_posix/test.sh b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_posix/test.sh new file mode 100755 index 00000000..d3976ff3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/connection_posix/test.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -eux + +# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir. +# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix. + +group=$(python -c \ + "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))") + +cd ../connection + +INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \ + -e target_hosts="${group}" \ + -e action_prefix= \ + -e local_tmp=/tmp/ansible-local \ + -e remote_tmp=/tmp/ansible-remote \ + "$@" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_config/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_config/aliases new file mode 100644 index 00000000..cdf1b9b1 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_config/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_config/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_config/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_config/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_config/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_config/tasks/main.yml new file mode 100644 index 00000000..ea3fbde5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_config/tasks/main.yml @@ -0,0 +1,11 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- include_tasks: test_docker_config.yml + when: docker_py_version is version('2.6.0', '>=') and docker_api_version is version('1.30', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_config tests!" + when: not(docker_py_version is version('2.6.0', '>=') and docker_api_version is version('1.30', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_config/tasks/test_docker_config.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_config/tasks/test_docker_config.yml new file mode 100644 index 00000000..8220e8f5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_config/tasks/test_docker_config.yml @@ -0,0 +1,139 @@ +--- +- block: + - shell: "docker info --format '{% raw %}{{json .}}{% endraw %}' | python -m json.tool" + + - name: Make sure we're not already using Docker swarm + docker_swarm: + state: absent + force: true + + - shell: "docker info --format '{% raw %}{{json .}}{% endraw %}' | python -m json.tool" + + - name: Create a Swarm cluster + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address}}" + + - name: Parameter name should be required + docker_config: + state: present + ignore_errors: yes + register: output + + - name: assert failure when called with no name + assert: + that: + - 'output.failed' + - 'output.msg == "missing required arguments: name"' + + - name: Test parameters + docker_config: + name: foo + state: present + ignore_errors: yes + register: output + + - name: assert failure when called with no data + assert: + that: + - 'output.failed' + - 'output.msg == "state is present but all of the following are missing: data"' + + - name: Create config + docker_config: + name: db_password + data: opensesame! + state: present + register: output + + - name: Create variable config_id + set_fact: + config_id: "{{ output.config_id }}" + + - name: Inspect config + command: "docker config inspect {{ config_id }}" + register: inspect + ignore_errors: yes + + - debug: var=inspect + + - name: assert config creation succeeded + assert: + that: + - "'db_password' in inspect.stdout" + - "'ansible_key' in inspect.stdout" + when: inspect is not failed + - assert: + that: + - "'is too new. Maximum supported API version is' in inspect.stderr" + when: inspect is failed + + - name: Create config again + docker_config: + name: db_password + data: opensesame! + state: present + register: output + + - name: assert create config is idempotent + assert: + that: + - not output.changed + + - name: Create config again (base64) + docker_config: + name: db_password + data: b3BlbnNlc2FtZSE= + data_is_b64: true + state: present + register: output + + - name: assert create config (base64) is idempotent + assert: + that: + - not output.changed + + - name: Update config + docker_config: + name: db_password + data: newpassword! + state: present + register: output + + - name: assert config was updated + assert: + that: + - output.changed + - output.config_id != config_id + + - name: Remove config + docker_config: + name: db_password + state: absent + + - name: Check that config is removed + command: "docker config inspect {{ config_id }}" + register: output + ignore_errors: yes + + - name: assert config was removed + assert: + that: + - output.failed + + - name: Remove config + docker_config: + name: db_password + state: absent + register: output + + - name: assert remove config is idempotent + assert: + that: + - not output.changed + + always: + - name: Remove a Swarm cluster + docker_swarm: + state: absent + force: true diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/aliases new file mode 100644 index 00000000..6e8edef7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/aliases @@ -0,0 +1,2 @@ +shippable/posix/group5 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/files/env-file b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/files/env-file new file mode 100644 index 00000000..b15f1b64 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/files/env-file @@ -0,0 +1,2 @@ +TEST3=val3 +TEST4=val4 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/filter_plugins/ipaddr_tools.py b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/filter_plugins/ipaddr_tools.py new file mode 100644 index 00000000..1b809aae --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/filter_plugins/ipaddr_tools.py @@ -0,0 +1,34 @@ +# (c) 2020, Felix Fontein <felix@fontein.de> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.community.crypto.plugins.module_utils.compat import ipaddress + + +def _normalize_ipaddr(ipaddr): + return ipaddress.ip_address(ipaddr).compressed + + +class FilterModule(object): + """ IP address and network manipulation filters """ + + def filters(self): + return { + 'normalize_ipaddr': _normalize_ipaddr, + } diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/main.yml new file mode 100644 index 00000000..338e0b1c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/main.yml @@ -0,0 +1,43 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Create random name prefix (for containers, networks, ...) +- name: Create random container name prefix + set_fact: + cname_prefix: "{{ 'ansible-test-%0x' % ((2**32) | random) }}" + cnames: [] + dnetworks: [] + +- debug: + msg: "Using container name prefix {{ cname_prefix }}" + +# Run the tests +- block: + - include_tasks: run-test.yml + with_fileglob: + - "tests/*.yml" + + always: + - name: "Make sure all containers are removed" + docker_container: + name: "{{ item }}" + state: absent + force_kill: yes + with_items: "{{ cnames }}" + diff: no + - name: "Make sure all networks are removed" + docker_network: + name: "{{ item }}" + state: absent + force: yes + with_items: "{{ dnetworks }}" + when: docker_py_version is version('1.10.0', '>=') + diff: no + + when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=') + +- fail: msg="Too old docker / docker-py version to run all docker_container tests!" + when: not(docker_py_version is version('3.5.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/run-test.yml new file mode 100644 index 00000000..a2999370 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/run-test.yml @@ -0,0 +1,3 @@ +--- +- name: "Loading tasks from {{ item }}" + include_tasks: "{{ item }}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/comparisons.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/comparisons.yml new file mode 100644 index 00000000..fadf4dc3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/comparisons.yml @@ -0,0 +1,463 @@ +--- +- name: Registering container name + set_fact: + cname: "{{ cname_prefix ~ '-comparisons' }}" +- name: Registering container name + set_fact: + cnames: "{{ cnames + [cname] }}" + +#################################################################### +## value ########################################################### +#################################################################### + +- name: value + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + hostname: example.com + register: value_1 + +- name: value (change, ignore) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + hostname: example.org + force_kill: yes + comparisons: + hostname: ignore + register: value_2 + +- name: value (change, strict) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + hostname: example.org + force_kill: yes + comparisons: + hostname: strict + register: value_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - value_1 is changed + - value_2 is not changed + - value_3 is changed + +#################################################################### +## list ############################################################ +#################################################################### + +- name: list + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_servers: + - 1.1.1.1 + - 8.8.8.8 + register: list_1 + +- name: list (change, ignore) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_servers: + - 9.9.9.9 + force_kill: yes + comparisons: + dns_servers: ignore + register: list_2 + +- name: list (change, strict) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_servers: + - 9.9.9.9 + force_kill: yes + comparisons: + dns_servers: strict + register: list_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - list_1 is changed + - list_2 is not changed + - list_3 is changed + +#################################################################### +## set ############################################################# +#################################################################### + +- name: set + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + groups: + - "1010" + - "1011" + register: set_1 + +- name: set (change, ignore) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + groups: + - "1010" + - "1011" + - "1012" + force_kill: yes + comparisons: + groups: ignore + register: set_2 + +- name: set (change, allow_more_present) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + groups: + - "1010" + - "1011" + - "1012" + force_kill: yes + comparisons: + groups: allow_more_present + register: set_3 + +- name: set (change, allow_more_present) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + groups: + - "1010" + - "1012" + force_kill: yes + comparisons: + groups: allow_more_present + register: set_4 + +- name: set (change, strict) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + groups: + - "1010" + - "1012" + force_kill: yes + comparisons: + groups: strict + register: set_5 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - set_1 is changed + - set_2 is not changed + - set_3 is changed + - set_4 is not changed + - set_5 is changed + +#################################################################### +## set(dict) ####################################################### +#################################################################### + +- name: set(dict) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + devices: + - "/dev/random:/dev/virt-random:rwm" + - "/dev/urandom:/dev/virt-urandom:rwm" + register: set_dict_1 + +- name: set(dict) (change, ignore) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + devices: + - "/dev/random:/dev/virt-random:rwm" + - "/dev/urandom:/dev/virt-urandom:rwm" + - "/dev/null:/dev/virt-null:rwm" + force_kill: yes + comparisons: + devices: ignore + register: set_dict_2 + +- name: set(dict) (change, allow_more_present) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + devices: + - "/dev/random:/dev/virt-random:rwm" + - "/dev/urandom:/dev/virt-urandom:rwm" + - "/dev/null:/dev/virt-null:rwm" + force_kill: yes + comparisons: + devices: allow_more_present + register: set_dict_3 + +- name: set(dict) (change, allow_more_present) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + devices: + - "/dev/random:/dev/virt-random:rwm" + - "/dev/null:/dev/virt-null:rwm" + force_kill: yes + comparisons: + devices: allow_more_present + register: set_dict_4 + +- name: set(dict) (change, strict) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + devices: + - "/dev/random:/dev/virt-random:rwm" + - "/dev/null:/dev/virt-null:rwm" + force_kill: yes + comparisons: + devices: strict + register: set_dict_5 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - set_dict_1 is changed + - set_dict_2 is not changed + - set_dict_3 is changed + - set_dict_4 is not changed + - set_dict_5 is changed + +#################################################################### +## dict ############################################################ +#################################################################### + +- name: dict + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + labels: + ansible.test.1: hello + ansible.test.2: world + register: dict_1 + +- name: dict (change, ignore) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + labels: + ansible.test.1: hello + ansible.test.2: world + ansible.test.3: ansible + force_kill: yes + comparisons: + labels: ignore + register: dict_2 + +- name: dict (change, allow_more_present) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + labels: + ansible.test.1: hello + ansible.test.2: world + ansible.test.3: ansible + force_kill: yes + comparisons: + labels: allow_more_present + register: dict_3 + +- name: dict (change, allow_more_present) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + labels: + ansible.test.1: hello + ansible.test.3: ansible + force_kill: yes + comparisons: + labels: allow_more_present + register: dict_4 + +- name: dict (change, strict) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + labels: + ansible.test.1: hello + ansible.test.3: ansible + force_kill: yes + comparisons: + labels: strict + register: dict_5 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - dict_1 is changed + - dict_2 is not changed + - dict_3 is changed + - dict_4 is not changed + - dict_5 is changed + +#################################################################### +## wildcard ######################################################## +#################################################################### + +- name: Pull {{ docker_test_image_hello_world }} image to make sure wildcard_2 test succeeds + # If the image isn't there, it will pull it and return 'changed'. + docker_image: + name: "{{ docker_test_image_hello_world }}" + source: pull + +- name: wildcard + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + hostname: example.com + stop_timeout: 1 + labels: + ansible.test.1: hello + ansible.test.2: world + ansible.test.3: ansible + register: wildcard_1 + +- name: wildcard (change, ignore) + docker_container: + image: "{{ docker_test_image_hello_world }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + hostname: example.org + stop_timeout: 2 + labels: + ansible.test.1: hello + ansible.test.4: ignore + force_kill: yes + comparisons: + '*': ignore + register: wildcard_2 + +- name: wildcard (change, strict) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + hostname: example.org + stop_timeout: 1 + labels: + ansible.test.1: hello + ansible.test.2: world + ansible.test.3: ansible + force_kill: yes + comparisons: + '*': strict + register: wildcard_3 + +- name: wildcard (no change, strict) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + hostname: example.org + stop_timeout: 1 + labels: + ansible.test.1: hello + ansible.test.2: world + ansible.test.3: ansible + force_kill: yes + comparisons: + '*': strict + register: wildcard_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - wildcard_1 is changed + - wildcard_2 is not changed + - wildcard_3 is changed + - wildcard_4 is not changed diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/compatibility.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/compatibility.yml new file mode 100644 index 00000000..de8758aa --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/compatibility.yml @@ -0,0 +1,118 @@ +--- +- name: Registering container name + set_fact: + cname: "{{ cname_prefix ~ '-hi' }}" +- name: Registering container name + set_fact: + cnames: "{{ cnames + [cname] }}" + +#################################################################### +## container_default_behavior: compatibility ####################### +#################################################################### + +- name: Start container (check) + docker_container: + name: "{{ cname }}" + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + state: started + container_default_behavior: compatibility + check_mode: yes + register: start_1 + +- name: Start container + docker_container: + name: "{{ cname }}" + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + state: started + container_default_behavior: compatibility + register: start_2 + +- name: Start container (idempotent) + docker_container: + name: "{{ cname }}" + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + state: started + container_default_behavior: compatibility + register: start_3 + +- name: Start container (idempotent check) + docker_container: + name: "{{ cname }}" + image: "{{ docker_test_image_alpine }}" + state: started + container_default_behavior: compatibility + check_mode: yes + register: start_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - start_1 is changed + - start_2 is changed + - start_3 is not changed + - start_4 is not changed + +#################################################################### +## container_default_behavior: no_defaults ######################### +#################################################################### + +- name: Start container (check) + docker_container: + name: "{{ cname }}" + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + state: started + container_default_behavior: no_defaults + check_mode: yes + register: start_1 + +- name: Start container + docker_container: + name: "{{ cname }}" + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + state: started + container_default_behavior: no_defaults + register: start_2 + +- name: Start container (idempotent) + docker_container: + name: "{{ cname }}" + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + state: started + container_default_behavior: no_defaults + register: start_3 + +- name: Start container (idempotent check) + docker_container: + name: "{{ cname }}" + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + state: started + container_default_behavior: no_defaults + check_mode: yes + register: start_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - start_1 is changed + - start_2 is changed + - start_3 is not changed + - start_4 is not changed diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/image-ids.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/image-ids.yml new file mode 100644 index 00000000..ff4a97a7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/image-ids.yml @@ -0,0 +1,141 @@ +--- +- name: Registering container name + set_fact: + cname: "{{ cname_prefix ~ '-iid' }}" +- name: Registering container name + set_fact: + cnames: "{{ cnames + [cname] }}" + +- name: Pull images + docker_image: + name: "{{ image }}" + source: pull + loop: + - "{{ docker_test_image_hello_world }}" + - "{{ docker_test_image_alpine }}" + loop_control: + loop_var: image + +- name: Get image ID of {{ docker_test_image_hello_world }} and {{ docker_test_image_alpine }} images + docker_image_info: + name: + - "{{ docker_test_image_hello_world }}" + - "{{ docker_test_image_alpine }}" + register: image_info + +- assert: + that: + - image_info.images | length == 2 + +- name: Print image IDs + debug: + msg: "{{ docker_test_image_hello_world }}: {{ image_info.images[0].Id }}; {{ docker_test_image_alpine }}: {{ image_info.images[1].Id }}" + +- name: Create container with {{ docker_test_image_hello_world }} image via ID + docker_container: + image: "{{ image_info.images[0].Id }}" + name: "{{ cname }}" + state: present + force_kill: yes + register: create_1 + +- name: Create container with {{ docker_test_image_hello_world }} image via ID (idempotent) + docker_container: + image: "{{ image_info.images[0].Id }}" + name: "{{ cname }}" + state: present + force_kill: yes + register: create_2 + +- name: Create container with {{ docker_test_image_alpine }} image via ID + docker_container: + image: "{{ image_info.images[1].Id }}" + name: "{{ cname }}" + state: present + force_kill: yes + register: create_3 + +- name: Create container with {{ docker_test_image_alpine }} image via ID (idempotent) + docker_container: + image: "{{ image_info.images[1].Id }}" + name: "{{ cname }}" + state: present + force_kill: yes + register: create_4 + +- name: Untag image + # Image will not be deleted since the container still uses it + docker_image: + name: "{{ docker_test_image_alpine }}" + force_absent: yes + state: absent + +- name: Create container with {{ docker_test_image_alpine }} image via name (check mode, will pull, same image) + docker_container: + image: "{{ docker_test_image_alpine }}" + name: "{{ cname }}" + state: present + register: create_5 + check_mode: yes + +- name: Create container with {{ docker_test_image_alpine }} image via name (will pull, same image) + docker_container: + image: "{{ docker_test_image_alpine }}" + name: "{{ cname }}" + state: present + register: create_6 + +- name: Cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - create_1 is changed + - create_2 is not changed + - create_3 is changed + - create_4 is not changed + - create_5 is changed + - create_6 is changed + - create_6.container.Image == image_info.images[1].Id + - create_6.container.Id == create_4.container.Id # make sure container wasn't recreated + +- name: Create container with {{ docker_test_image_digest_base }} image via old digest + docker_container: + image: "{{ docker_test_image_digest_base }}@sha256:{{ docker_test_image_digest_v1 }}" + name: "{{ cname }}" + state: present + force_kill: yes + register: digest_1 + +- name: Create container with {{ docker_test_image_digest_base }} image via old digest (idempotent) + docker_container: + image: "{{ docker_test_image_digest_base }}@sha256:{{ docker_test_image_digest_v1 }}" + name: "{{ cname }}" + state: present + force_kill: yes + register: digest_2 + +- name: Update container with {{ docker_test_image_digest_base }} image via new digest + docker_container: + image: "{{ docker_test_image_digest_base }}@sha256:{{ docker_test_image_digest_v2 }}" + name: "{{ cname }}" + state: present + force_kill: yes + register: digest_3 + +- name: Cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - digest_1 is changed + - digest_2 is not changed + - digest_3 is changed diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml new file mode 100644 index 00000000..dbb3967f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml @@ -0,0 +1,445 @@ +--- +- name: Registering container name + set_fact: + cname: "{{ cname_prefix ~ '-mounts' }}" + cname_h1: "{{ cname_prefix ~ '-mounts-h1' }}" + cname_h2: "{{ cname_prefix ~ '-mounts-h2' }}" +- name: Registering container name + set_fact: + cnames: "{{ cnames + [cname, cname_h1, cname_h2] }}" + +#################################################################### +## keep_volumes #################################################### +#################################################################### + +# TODO: - keep_volumes + +#################################################################### +## mounts ########################################################## +#################################################################### + +- name: mounts + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + mounts: + - source: /tmp + target: /tmp + type: bind + - source: / + target: /whatever + type: bind + read_only: no + register: mounts_1 + ignore_errors: yes + +- name: mounts (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + mounts: + - source: / + target: /whatever + type: bind + read_only: no + - source: /tmp + target: /tmp + type: bind + register: mounts_2 + ignore_errors: yes + +- name: mounts (less mounts) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + mounts: + - source: /tmp + target: /tmp + type: bind + register: mounts_3 + ignore_errors: yes + +- name: mounts (more mounts) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + mounts: + - source: /tmp + target: /tmp + type: bind + - source: /tmp + target: /somewhereelse + type: bind + read_only: yes + force_kill: yes + register: mounts_4 + ignore_errors: yes + +- name: mounts (different modes) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + mounts: + - source: /tmp + target: /tmp + type: bind + - source: /tmp + target: /somewhereelse + type: bind + read_only: no + force_kill: yes + register: mounts_5 + ignore_errors: yes + +- name: mounts (endpoint collision) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + mounts: + - source: /home + target: /x + type: bind + - source: /etc + target: /x + type: bind + read_only: no + force_kill: yes + register: mounts_6 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - mounts_1 is changed + - mounts_2 is not changed + - mounts_3 is not changed + - mounts_4 is changed + - mounts_5 is changed + - mounts_6 is failed + - "'The mount point \"/x\" appears twice in the mounts option' == mounts_6.msg" + when: docker_py_version is version('2.6.0', '>=') +- assert: + that: + - mounts_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in mounts_1.msg" + - "'Minimum version required is 2.6.0 ' in mounts_1.msg" + when: docker_py_version is version('2.6.0', '<') + +#################################################################### +## mounts + volumes ################################################ +#################################################################### + +- name: mounts + volumes + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + mounts: + - source: / + target: /whatever + type: bind + read_only: yes + volumes: + - /tmp:/tmp + register: mounts_volumes_1 + ignore_errors: yes + +- name: mounts + volumes (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + mounts: + - source: / + target: /whatever + type: bind + read_only: yes + volumes: + - /tmp:/tmp + register: mounts_volumes_2 + ignore_errors: yes + +- name: mounts + volumes (switching) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + mounts: + - source: /tmp + target: /tmp + type: bind + read_only: no + volumes: + - /:/whatever:ro + force_kill: yes + register: mounts_volumes_3 + ignore_errors: yes + +- name: mounts + volumes (collision, should fail) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + mounts: + - source: /tmp + target: /tmp + type: bind + read_only: no + volumes: + - /tmp:/tmp + force_kill: yes + register: mounts_volumes_4 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - mounts_volumes_1 is changed + - mounts_volumes_2 is not changed + - mounts_volumes_3 is changed + - mounts_volumes_4 is failed + - "'The mount point \"/tmp\" appears both in the volumes and mounts option' in mounts_volumes_4.msg" + when: docker_py_version is version('2.6.0', '>=') +- assert: + that: + - mounts_volumes_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in mounts_1.msg" + - "'Minimum version required is 2.6.0 ' in mounts_1.msg" + when: docker_py_version is version('2.6.0', '<') + +#################################################################### +## volume_driver ################################################### +#################################################################### + +- name: volume_driver + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + volume_driver: local + state: started + register: volume_driver_1 + +- name: volume_driver (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + volume_driver: local + state: started + register: volume_driver_2 + +- name: volume_driver (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + volume_driver: / + state: started + force_kill: yes + register: volume_driver_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - volume_driver_1 is changed + - volume_driver_2 is not changed + - volume_driver_3 is changed + +#################################################################### +## volumes ######################################################### +#################################################################### + +- name: volumes + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + volumes: + - "/tmp:/tmp" + - "/:/whatever:rw,z" + - "/anon:rw" + register: volumes_1 + +- name: volumes (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + volumes: + - "/:/whatever:rw,z" + - "/tmp:/tmp" + - "/anon:rw" + register: volumes_2 + +- name: volumes (less volumes) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + volumes: + - "/tmp:/tmp" + register: volumes_3 + +- name: volumes (more volumes) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + volumes: + - "/tmp:/tmp" + - "/tmp:/somewhereelse:ro,Z" + force_kill: yes + register: volumes_4 + +- name: volumes (different modes) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + volumes: + - "/tmp:/tmp" + - "/tmp:/somewhereelse:ro" + force_kill: yes + register: volumes_5 + +- name: volumes (collision) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + volumes: + - "/etc:/tmp" + - "/home:/tmp:ro" + force_kill: yes + register: volumes_6 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - volumes_1 is changed + - volumes_1.container.Config.Volumes | length == 1 + - volumes_1.container.Config.Volumes['/anon:rw'] | length == 0 + - volumes_2 is not changed + - volumes_3 is not changed + - volumes_4 is changed + - not volumes_4.container.Config.Volumes + - volumes_5 is changed + - volumes_6 is failed + - "'The mount point \"/tmp\" appears twice in the volumes option' in volumes_6.msg" + +#################################################################### +## volumes_from #################################################### +#################################################################### + +- name: start helpers + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ container_name }}" + state: started + volumes: + - "{{ '/tmp:/tmp' if container_name == cname_h1 else '/:/whatever:ro' }}" + loop: + - "{{ cname_h1 }}" + - "{{ cname_h2 }}" + loop_control: + loop_var: container_name + +- name: volumes_from + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + volumes_from: "{{ cname_h1 }}" + register: volumes_from_1 + +- name: volumes_from (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + volumes_from: "{{ cname_h1 }}" + register: volumes_from_2 + +- name: volumes_from (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + volumes_from: "{{ cname_h2 }}" + force_kill: yes + register: volumes_from_3 + +- name: cleanup + docker_container: + name: "{{ container_name }}" + state: absent + force_kill: yes + loop: + - "{{ cname }}" + - "{{ cname_h1 }}" + - "{{ cname_h2 }}" + loop_control: + loop_var: container_name + diff: no + +- assert: + that: + - volumes_from_1 is changed + - volumes_from_2 is not changed + - volumes_from_3 is changed + +#################################################################### +#################################################################### +#################################################################### diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/network.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/network.yml new file mode 100644 index 00000000..9ef33643 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/network.yml @@ -0,0 +1,747 @@ +--- +- name: Registering container name + set_fact: + cname: "{{ cname_prefix ~ '-network' }}" + cname_h1: "{{ cname_prefix ~ '-network-h1' }}" + nname_1: "{{ cname_prefix ~ '-network-1' }}" + nname_2: "{{ cname_prefix ~ '-network-2' }}" + nname_3: "{{ cname_prefix ~ '-network-3' }}" +- name: Registering container name + set_fact: + cnames: "{{ cnames + [cname, cname_h1] }}" + dnetworks: "{{ dnetworks + [nname_1, nname_2, nname_3] }}" + +- name: Create networks + docker_network: + name: "{{ network_name }}" + state: present + loop: + - "{{ nname_1 }}" + - "{{ nname_2 }}" + loop_control: + loop_var: network_name + when: docker_py_version is version('1.10.0', '>=') + +- set_fact: + subnet_ipv4_base: 10.{{ 16 + (240 | random) }}.{{ 16 + (240 | random) }} + subnet_ipv6_base: fdb6:feea:{{ '%0.4x:%0.4x' | format(65536 | random, 65536 | random) }} + # If netaddr would be installed on the controller, one could do: + # subnet_ipv4: "10.{{ 16 + (240 | random) }}.{{ 16 + (240 | random) }}.0/24" + # subnet_ipv6: "fdb6:feea:{{ '%0.4x:%0.4x' | format(65536 | random, 65536 | random) }}::/64" + +- set_fact: + subnet_ipv4: "{{ subnet_ipv4_base }}.0/24" + subnet_ipv6: "{{ subnet_ipv6_base }}::/64" + nname_3_ipv4_2: "{{ subnet_ipv4_base }}.2" + nname_3_ipv4_3: "{{ subnet_ipv4_base }}.3" + nname_3_ipv4_4: "{{ subnet_ipv4_base }}.4" + nname_3_ipv6_2: "{{ subnet_ipv6_base }}::2" + nname_3_ipv6_3: "{{ subnet_ipv6_base }}::3" + nname_3_ipv6_4: "{{ subnet_ipv6_base }}::4" + # If netaddr would be installed on the controller, one could do: + # nname_3_ipv4_2: "{{ subnet_ipv4 | ansible.netcommon.next_nth_usable(2) }}" + # nname_3_ipv4_3: "{{ subnet_ipv4 | ansible.netcommon.next_nth_usable(3) }}" + # nname_3_ipv4_4: "{{ subnet_ipv4 | ansible.netcommon.next_nth_usable(4) }}" + # nname_3_ipv6_2: "{{ subnet_ipv6 | ansible.netcommon.next_nth_usable(2) }}" + # nname_3_ipv6_3: "{{ subnet_ipv6 | ansible.netcommon.next_nth_usable(3) }}" + # nname_3_ipv6_4: "{{ subnet_ipv6 | ansible.netcommon.next_nth_usable(4) }}" + +- debug: + msg: "Chose random IPv4 subnet {{ subnet_ipv4 }} and random IPv6 subnet {{ subnet_ipv6 }}" + +- name: Create network with fixed IPv4 and IPv6 subnets + docker_network: + name: "{{ nname_3 }}" + enable_ipv6: yes + ipam_config: + - subnet: "{{ subnet_ipv4 }}" + - subnet: "{{ subnet_ipv6 }}" + state: present + when: docker_py_version is version('1.10.0', '>=') + +#################################################################### +## network_mode #################################################### +#################################################################### + +- name: network_mode + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + network_mode: host + register: network_mode_1 + +- name: network_mode (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + network_mode: host + register: network_mode_2 + +- name: network_mode (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + network_mode: none + force_kill: yes + register: network_mode_3 + +- name: network_mode (container mode setup) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname_h1 }}" + state: started + register: cname_h1_id + +- name: network_mode (container mode) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + network_mode: "container:{{ cname_h1_id.container.Id }}" + force_kill: yes + register: network_mode_4 + +- name: network_mode (container mode idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + network_mode: "container:{{ cname_h1 }}" + register: network_mode_5 + +- name: cleanup + docker_container: + name: "{{ container_name }}" + state: absent + force_kill: yes + loop: + - "{{ cname }}" + - "{{ cname_h1 }}" + loop_control: + loop_var: container_name + diff: no + +- assert: + that: + - network_mode_1 is changed + - network_mode_1.container.HostConfig.NetworkMode == 'host' + - network_mode_2 is not changed + - network_mode_2.container.HostConfig.NetworkMode == 'host' + - network_mode_3 is changed + - network_mode_3.container.HostConfig.NetworkMode == 'none' + - network_mode_4 is changed + - network_mode_4.container.HostConfig.NetworkMode == 'container:' ~ cname_h1_id.container.Id + - network_mode_5 is not changed + - network_mode_5.container.HostConfig.NetworkMode == 'container:' ~ cname_h1_id.container.Id + +#################################################################### +## networks, purge_networks for networks_cli_compatible=no ######### +#################################################################### + +- block: + - name: networks_cli_compatible=no, networks w/o purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + - name: "{{ nname_2 }}" + networks_cli_compatible: no + register: networks_1 + + - name: networks_cli_compatible=no, networks w/o purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + - name: "{{ nname_2 }}" + networks_cli_compatible: no + register: networks_2 + + - name: networks_cli_compatible=no, networks, purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + purge_networks: yes + networks: + - name: bridge + - name: "{{ nname_1 }}" + networks_cli_compatible: no + force_kill: yes + register: networks_3 + + - name: networks_cli_compatible=no, networks, purge_networks (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + purge_networks: yes + networks: + - name: "{{ nname_1 }}" + - name: bridge + networks_cli_compatible: no + register: networks_4 + + - name: networks_cli_compatible=no, networks (less networks) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: bridge + networks_cli_compatible: no + register: networks_5 + + - name: networks_cli_compatible=no, networks, purge_networks (less networks) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + purge_networks: yes + networks: + - name: bridge + networks_cli_compatible: no + force_kill: yes + register: networks_6 + + - name: networks_cli_compatible=no, networks, purge_networks (more networks) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + purge_networks: yes + networks: + - name: bridge + - name: "{{ nname_2 }}" + networks_cli_compatible: no + force_kill: yes + register: networks_7 + + - name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + + - assert: + that: + # networks_1 has networks default, 'bridge', nname_1 + - networks_1 is changed + - networks_1.container.NetworkSettings.Networks | length == 3 + - nname_1 in networks_1.container.NetworkSettings.Networks + - nname_2 in networks_1.container.NetworkSettings.Networks + - "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks" + # networks_2 has networks default, 'bridge', nname_1 + - networks_2 is not changed + - networks_2.container.NetworkSettings.Networks | length == 3 + - nname_1 in networks_2.container.NetworkSettings.Networks + - nname_2 in networks_1.container.NetworkSettings.Networks + - "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks" + # networks_3 has networks 'bridge', nname_1 + - networks_3 is changed + - networks_3.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_3.container.NetworkSettings.Networks + - "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks" + # networks_4 has networks 'bridge', nname_1 + - networks_4 is not changed + - networks_4.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_4.container.NetworkSettings.Networks + - "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks" + # networks_5 has networks 'bridge', nname_1 + - networks_5 is not changed + - networks_5.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_5.container.NetworkSettings.Networks + - "'default' in networks_5.container.NetworkSettings.Networks or 'bridge' in networks_5.container.NetworkSettings.Networks" + # networks_6 has networks 'bridge' + - networks_6 is changed + - networks_6.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks" + # networks_7 has networks 'bridge', nname_2 + - networks_7 is changed + - networks_7.container.NetworkSettings.Networks | length == 2 + - nname_2 in networks_7.container.NetworkSettings.Networks + - "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks" + + when: docker_py_version is version('1.10.0', '>=') + +#################################################################### +## networks for networks_cli_compatible=yes ######################## +#################################################################### + +- block: + - name: networks_cli_compatible=yes, networks specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + aliases: + - alias1 + - alias2 + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + register: networks_1 + + - name: networks_cli_compatible=yes, networks specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + register: networks_2 + + - name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + + - name: networks_cli_compatible=yes, empty networks list specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + register: networks_3 + + - name: networks_cli_compatible=yes, empty networks list specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + register: networks_4 + + - name: networks_cli_compatible=yes, empty networks list specified, purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + purge_networks: yes + force_kill: yes + register: networks_5 + + - name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + + - name: networks_cli_compatible=yes, networks not specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks_cli_compatible: yes + force_kill: yes + register: networks_6 + + - name: networks_cli_compatible=yes, networks not specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks_cli_compatible: yes + register: networks_7 + + - name: networks_cli_compatible=yes, networks not specified, purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks_cli_compatible: yes + purge_networks: yes + force_kill: yes + register: networks_8 + + - name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + + - debug: var=networks_3 + + - assert: + that: + # networks_1 has networks nname_1, nname_2 + - networks_1 is changed + - networks_1.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_1.container.NetworkSettings.Networks + - nname_2 in networks_1.container.NetworkSettings.Networks + # networks_2 has networks nname_1, nname_2 + - networks_2 is not changed + - networks_2.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_2.container.NetworkSettings.Networks + - nname_2 in networks_1.container.NetworkSettings.Networks + # networks_3 has networks 'bridge' + - networks_3 is changed + - networks_3.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks" + # networks_4 has networks 'bridge' + - networks_4 is not changed + - networks_4.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks" + # networks_5 has no networks + - networks_5 is changed + - networks_5.container.NetworkSettings.Networks | length == 0 + # networks_6 has networks 'bridge' + - networks_6 is changed + - networks_6.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks" + # networks_7 has networks 'bridge' + - networks_7 is not changed + - networks_7.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks" + # networks_8 has no networks + - networks_8 is changed + - networks_8.container.NetworkSettings.Networks | length == 0 + + when: docker_py_version is version('1.10.0', '>=') + +#################################################################### +## networks with comparisons ####################################### +#################################################################### + +- block: + - name: create container with one network + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + networks_cli_compatible: yes + register: networks_1 + + - name: different networks, comparisons=ignore + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + comparisons: + networks: ignore + register: networks_2 + + - name: less networks, comparisons=ignore + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + comparisons: + networks: ignore + register: networks_3 + + - name: less networks, comparisons=allow_more_present + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + comparisons: + networks: allow_more_present + register: networks_4 + + - name: different networks, comparisons=allow_more_present + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + comparisons: + networks: allow_more_present + force_kill: yes + register: networks_5 + + - name: different networks, comparisons=strict + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + comparisons: + networks: strict + force_kill: yes + register: networks_6 + + - name: less networks, comparisons=strict + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + comparisons: + networks: strict + force_kill: yes + register: networks_7 + + - name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + + - assert: + that: + # networks_1 has networks nname_1 + - networks_1 is changed + - networks_1.container.NetworkSettings.Networks | length == 1 + - nname_1 in networks_1.container.NetworkSettings.Networks + # networks_2 has networks nname_1 + - networks_2 is not changed + - networks_2.container.NetworkSettings.Networks | length == 1 + - nname_1 in networks_2.container.NetworkSettings.Networks + # networks_3 has networks nname_1 + - networks_3 is not changed + - networks_3.container.NetworkSettings.Networks | length == 1 + - nname_1 in networks_3.container.NetworkSettings.Networks + # networks_4 has networks nname_1 + - networks_4 is not changed + - networks_4.container.NetworkSettings.Networks | length == 1 + - nname_1 in networks_4.container.NetworkSettings.Networks + # networks_5 has networks nname_1, nname_2 + - networks_5 is changed + - networks_5.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_5.container.NetworkSettings.Networks + - nname_2 in networks_5.container.NetworkSettings.Networks + # networks_6 has networks nname_2 + - networks_6 is changed + - networks_6.container.NetworkSettings.Networks | length == 1 + - nname_2 in networks_6.container.NetworkSettings.Networks + # networks_7 has no networks + - networks_7 is changed + - networks_7.container.NetworkSettings.Networks | length == 0 + + when: docker_py_version is version('1.10.0', '>=') + +#################################################################### +## networks with IP address ######################################## +#################################################################### + +- block: + - name: create container (stopped) with one network and fixed IP + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: stopped + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_2 }}" + ipv6_address: "{{ nname_3_ipv6_2 }}" + networks_cli_compatible: yes + register: networks_1 + + - name: create container (stopped) with one network and fixed IP (idempotent) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: stopped + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_2 }}" + ipv6_address: "{{ nname_3_ipv6_2 }}" + networks_cli_compatible: yes + register: networks_2 + + - name: create container (stopped) with one network and fixed IP (different IPv4) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: stopped + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_3 }}" + ipv6_address: "{{ nname_3_ipv6_2 }}" + networks_cli_compatible: yes + register: networks_3 + + - name: create container (stopped) with one network and fixed IP (different IPv6) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: stopped + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_3 }}" + ipv6_address: "{{ nname_3_ipv6_3 }}" + networks_cli_compatible: yes + register: networks_4 + + - name: create container (started) with one network and fixed IP + docker_container: + name: "{{ cname }}" + state: started + register: networks_5 + + - name: create container (started) with one network and fixed IP (different IPv4) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_4 }}" + ipv6_address: "{{ nname_3_ipv6_3 }}" + networks_cli_compatible: yes + force_kill: yes + register: networks_6 + + - name: create container (started) with one network and fixed IP (different IPv6) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_4 }}" + ipv6_address: "{{ nname_3_ipv6_4 }}" + networks_cli_compatible: yes + force_kill: yes + register: networks_7 + + - name: create container (started) with one network and fixed IP (idempotent) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_4 }}" + ipv6_address: "{{ nname_3_ipv6_4 }}" + networks_cli_compatible: yes + register: networks_8 + + - name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + + - assert: + that: + - networks_1 is changed + - networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2 + - networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr + - networks_1.container.NetworkSettings.Networks[nname_3].IPAddress == "" + - networks_1.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" + - networks_2 is not changed + - networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2 + - networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr + - networks_2.container.NetworkSettings.Networks[nname_3].IPAddress == "" + - networks_2.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" + - networks_3 is changed + - networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 + - networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr + - networks_3.container.NetworkSettings.Networks[nname_3].IPAddress == "" + - networks_3.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" + - networks_4 is changed + - networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 + - networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_4.container.NetworkSettings.Networks[nname_3].IPAddress == "" + - networks_4.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" + - networks_5 is changed + - networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 + - networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_5.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_3 + - networks_5.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_6 is changed + - networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 + - networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_6.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 + - networks_6.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_7 is changed + - networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 + - networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr + - networks_7.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 + - networks_7.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr + - networks_8 is not changed + - networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 + - networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr + - networks_8.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 + - networks_8.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr + + when: docker_py_version is version('1.10.0', '>=') + +#################################################################### +#################################################################### +#################################################################### + +- name: Delete networks + docker_network: + name: "{{ network_name }}" + state: absent + force: yes + loop: + - "{{ nname_1 }}" + - "{{ nname_2 }}" + - "{{ nname_3 }}" + loop_control: + loop_var: network_name + when: docker_py_version is version('1.10.0', '>=') diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/options.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/options.yml new file mode 100644 index 00000000..0e169fbb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/options.yml @@ -0,0 +1,3905 @@ +--- +- name: Registering container name + set_fact: + cname: "{{ cname_prefix ~ '-options' }}" + cname_h1: "{{ cname_prefix ~ '-options-h1' }}" + cname_h2: "{{ cname_prefix ~ '-options-h2' }}" + cname_h3: "{{ cname_prefix ~ '-options-h3' }}" +- name: Registering container name + set_fact: + cnames: "{{ cnames + [cname, cname_h1, cname_h2, cname_h3] }}" + +#################################################################### +## auto_remove ##################################################### +#################################################################### + +- name: auto_remove + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "echo"' + name: "{{ cname }}" + state: started + auto_remove: yes + register: auto_remove_1 + ignore_errors: yes + +- name: Give container 1 second to be sure it terminated + pause: + seconds: 1 + +- name: auto_remove (verify) + docker_container: + name: "{{ cname }}" + state: absent + register: auto_remove_2 + ignore_errors: yes + +- assert: + that: + - auto_remove_1 is changed + - auto_remove_2 is not changed + when: docker_py_version is version('2.1.0', '>=') +- assert: + that: + - auto_remove_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in auto_remove_1.msg" + - "'Minimum version required is 2.1.0 ' in auto_remove_1.msg" + when: docker_py_version is version('2.1.0', '<') + +#################################################################### +## blkio_weight #################################################### +#################################################################### + +- name: blkio_weight + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + blkio_weight: 123 + register: blkio_weight_1 + +- name: blkio_weight (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + blkio_weight: 123 + register: blkio_weight_2 + +- name: blkio_weight (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + blkio_weight: 234 + force_kill: yes + register: blkio_weight_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - blkio_weight_1 is changed + - "blkio_weight_2 is not changed or 'Docker warning: Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.' in blkio_weight_2.warnings" + - blkio_weight_3 is changed + +#################################################################### +## cap_drop, capabilities ########################################## +#################################################################### + +- name: capabilities, cap_drop + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + capabilities: + - sys_time + cap_drop: + - all + register: capabilities_1 + +- name: capabilities, cap_drop (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + capabilities: + - sys_time + cap_drop: + - all + register: capabilities_2 + +- name: capabilities, cap_drop (less) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + capabilities: [] + cap_drop: + - all + register: capabilities_3 + +- name: capabilities, cap_drop (changed) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + capabilities: + - setgid + cap_drop: + - all + force_kill: yes + register: capabilities_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - capabilities_1 is changed + - capabilities_2 is not changed + - capabilities_3 is not changed + - capabilities_4 is changed + +#################################################################### +## cgroup_parent ################################################### +#################################################################### + +- name: cgroup_parent + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + cgroup_parent: '' + register: cgroup_parent_1 + +- name: cgroup_parent (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + cgroup_parent: '' + register: cgroup_parent_2 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - cgroup_parent_1 is changed + - cgroup_parent_2 is not changed + +#################################################################### +## command ######################################################### +#################################################################### + +- name: command + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -v -c "sleep 10m"' + name: "{{ cname }}" + state: started + register: command_1 + +- name: command (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -v -c "sleep 10m"' + name: "{{ cname }}" + state: started + register: command_2 + +- name: command (less parameters) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + force_kill: yes + register: command_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - command_1 is changed + - command_2 is not changed + - command_3 is changed + +#################################################################### +## cpu_period ###################################################### +#################################################################### + +- name: cpu_period + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpu_period: 90000 + state: started + register: cpu_period_1 + +- name: cpu_period (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpu_period: 90000 + state: started + register: cpu_period_2 + +- name: cpu_period (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpu_period: 50000 + state: started + force_kill: yes + register: cpu_period_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - cpu_period_1 is changed + - cpu_period_2 is not changed + - cpu_period_3 is changed + +#################################################################### +## cpu_quota ####################################################### +#################################################################### + +- name: cpu_quota + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpu_quota: 150000 + state: started + register: cpu_quota_1 + +- name: cpu_quota (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpu_quota: 150000 + state: started + register: cpu_quota_2 + +- name: cpu_quota (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpu_quota: 50000 + state: started + force_kill: yes + register: cpu_quota_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - cpu_quota_1 is changed + - cpu_quota_2 is not changed + - cpu_quota_3 is changed + +#################################################################### +## cpu_shares ###################################################### +#################################################################### + +- name: cpu_shares + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpu_shares: 900 + state: started + register: cpu_shares_1 + +- name: cpu_shares (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpu_shares: 900 + state: started + register: cpu_shares_2 + +- name: cpu_shares (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpu_shares: 1100 + state: started + force_kill: yes + register: cpu_shares_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - cpu_shares_1 is changed + - cpu_shares_2 is not changed + - cpu_shares_3 is changed + +#################################################################### +## cpuset_cpus ##################################################### +#################################################################### + +- name: cpuset_cpus + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpuset_cpus: "0" + state: started + register: cpuset_cpus_1 + +- name: cpuset_cpus (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpuset_cpus: "0" + state: started + register: cpuset_cpus_2 + +- name: cpuset_cpus (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpuset_cpus: "1" + state: started + force_kill: yes + # This will fail if the system the test is run on doesn't have + # multiple CPUs/cores available. + ignore_errors: yes + register: cpuset_cpus_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - cpuset_cpus_1 is changed + - cpuset_cpus_2 is not changed + - cpuset_cpus_3 is failed or cpuset_cpus_3 is changed + +#################################################################### +## cpuset_mems ##################################################### +#################################################################### + +- name: cpuset_mems + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpuset_mems: "0" + state: started + register: cpuset_mems_1 + +- name: cpuset_mems (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpuset_mems: "0" + state: started + register: cpuset_mems_2 + +- name: cpuset_mems (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpuset_mems: "1" + state: started + force_kill: yes + # This will fail if the system the test is run on doesn't have + # multiple MEMs available. + ignore_errors: yes + register: cpuset_mems_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - cpuset_mems_1 is changed + - cpuset_mems_2 is not changed + - cpuset_mems_3 is failed or cpuset_mems_3 is changed + +#################################################################### +## cpus ############################################################ +#################################################################### + +- name: cpus + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpus: 1 + state: started + ignore_errors: yes + register: cpus_1 + +- name: cpus (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpus: 1 + state: started + ignore_errors: yes + register: cpus_2 + +- name: cpus (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + cpus: 1.5 + state: started + force_kill: yes + # This will fail if the system the test is run on doesn't have + # multiple MEMs available. + ignore_errors: yes + register: cpus_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - cpus_1 is changed + - cpus_2 is not changed and cpus_2 is not failed + - cpus_3 is failed or cpus_3 is changed + when: docker_py_version is version('2.3.0', '>=') +- assert: + that: + - cpus_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in cpus_1.msg" + - "'Minimum version required is 2.3.0 ' in cpus_1.msg" + when: docker_py_version is version('2.3.0', '<') + +#################################################################### +## debug ########################################################### +#################################################################### + +- name: debug (create) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: present + debug: yes + register: debug_1 + +- name: debug (start) + docker_container: + name: "{{ cname }}" + state: started + debug: yes + register: debug_2 + +- name: debug (stop) + docker_container: + image: "{{ docker_test_image_alpine }}" + name: "{{ cname }}" + state: stopped + force_kill: yes + debug: yes + register: debug_3 + +- name: debug (absent) + docker_container: + name: "{{ cname }}" + state: absent + debug: yes + force_kill: yes + register: debug_4 + +- assert: + that: + - debug_1 is changed + - debug_2 is changed + - debug_3 is changed + - debug_4 is changed + +#################################################################### +## detach, cleanup ################################################# +#################################################################### + +- name: detach without cleanup + docker_container: + name: "{{ cname }}" + image: "{{ docker_test_image_hello_world }}" + detach: no + register: detach_no_cleanup + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + register: detach_no_cleanup_cleanup + diff: no + +- name: detach with cleanup + docker_container: + name: "{{ cname }}" + image: "{{ docker_test_image_hello_world }}" + detach: no + cleanup: yes + register: detach_cleanup + +- name: cleanup (unnecessary) + docker_container: + name: "{{ cname }}" + state: absent + register: detach_cleanup_cleanup + diff: no + +- name: detach with auto_remove and cleanup + docker_container: + name: "{{ cname }}" + image: "{{ docker_test_image_hello_world }}" + detach: no + auto_remove: yes + cleanup: yes + register: detach_auto_remove + ignore_errors: yes + +- name: cleanup (unnecessary) + docker_container: + name: "{{ cname }}" + state: absent + register: detach_auto_remove_cleanup + diff: no + +- assert: + that: + # NOTE that 'Output' sometimes fails to contain the correct output + # of hello-world. We don't know why this happens, but it happens + # often enough to be annoying. That's why we disable this for now, + # and simply test that 'Output' is contained in the result. + - "'Output' in detach_no_cleanup.container" + - detach_no_cleanup.status == 0 + # - "'Hello from Docker!' in detach_no_cleanup.container.Output" + - detach_no_cleanup_cleanup is changed + - "'Output' in detach_cleanup.container" + - detach_cleanup.status == 0 + # - "'Hello from Docker!' in detach_cleanup.container.Output" + - detach_cleanup_cleanup is not changed +- assert: + that: + - "'Cannot retrieve result as auto_remove is enabled' == detach_auto_remove.container.Output" + - detach_auto_remove_cleanup is not changed + when: docker_py_version is version('2.1.0', '>=') + +#################################################################### +## devices ######################################################### +#################################################################### + +- name: devices + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + devices: + - "/dev/random:/dev/virt-random:rwm" + - "/dev/urandom:/dev/virt-urandom:rwm" + register: devices_1 + +- name: devices (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + devices: + - "/dev/urandom:/dev/virt-urandom:rwm" + - "/dev/random:/dev/virt-random:rwm" + register: devices_2 + +- name: devices (less) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + devices: + - "/dev/random:/dev/virt-random:rwm" + register: devices_3 + +- name: devices (changed) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + devices: + - "/dev/random:/dev/virt-random:rwm" + - "/dev/null:/dev/virt-null:rwm" + force_kill: yes + register: devices_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - devices_1 is changed + - devices_2 is not changed + - devices_3 is not changed + - devices_4 is changed + +#################################################################### +## device_read_bps ################################################# +#################################################################### + +- name: device_read_bps + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + device_read_bps: + - path: /dev/random + rate: 20M + - path: /dev/urandom + rate: 10K + register: device_read_bps_1 + ignore_errors: yes + +- name: device_read_bps (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + device_read_bps: + - path: /dev/urandom + rate: 10K + - path: /dev/random + rate: 20M + register: device_read_bps_2 + ignore_errors: yes + +- name: device_read_bps (lesser entries) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + device_read_bps: + - path: /dev/random + rate: 20M + register: device_read_bps_3 + ignore_errors: yes + +- name: device_read_bps (changed) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + device_read_bps: + - path: /dev/random + rate: 10M + - path: /dev/urandom + rate: 5K + force_kill: yes + register: device_read_bps_4 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - device_read_bps_1 is changed + - device_read_bps_2 is not changed + - device_read_bps_3 is not changed + - device_read_bps_4 is changed + when: docker_py_version is version('1.9.0', '>=') +- assert: + that: + - device_read_bps_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in device_read_bps_1.msg" + - "'Minimum version required is 1.9.0 ' in device_read_bps_1.msg" + when: docker_py_version is version('1.9.0', '<') + +#################################################################### +## device_read_iops ################################################ +#################################################################### + +- name: device_read_iops + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + device_read_iops: + - path: /dev/random + rate: 10 + - path: /dev/urandom + rate: 20 + register: device_read_iops_1 + ignore_errors: yes + +- name: device_read_iops (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + device_read_iops: + - path: /dev/urandom + rate: "20" + - path: /dev/random + rate: 10 + register: device_read_iops_2 + ignore_errors: yes + +- name: device_read_iops (less) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + device_read_iops: + - path: /dev/random + rate: 10 + register: device_read_iops_3 + ignore_errors: yes + +- name: device_read_iops (changed) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + device_read_iops: + - path: /dev/random + rate: 30 + - path: /dev/urandom + rate: 50 + force_kill: yes + register: device_read_iops_4 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - device_read_iops_1 is changed + - device_read_iops_2 is not changed + - device_read_iops_3 is not changed + - device_read_iops_4 is changed + when: docker_py_version is version('1.9.0', '>=') +- assert: + that: + - device_read_iops_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in device_read_iops_1.msg" + - "'Minimum version required is 1.9.0 ' in device_read_iops_1.msg" + when: docker_py_version is version('1.9.0', '<') + +#################################################################### +## device_write_bps and device_write_iops ########################## +#################################################################### + +- name: device_write_bps and device_write_iops + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + device_write_bps: + - path: /dev/random + rate: 10M + device_write_iops: + - path: /dev/urandom + rate: 30 + register: device_write_limit_1 + ignore_errors: yes + +- name: device_write_bps and device_write_iops (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + device_write_bps: + - path: /dev/random + rate: 10M + device_write_iops: + - path: /dev/urandom + rate: 30 + register: device_write_limit_2 + ignore_errors: yes + +- name: device_write_bps device_write_iops (changed) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + device_write_bps: + - path: /dev/random + rate: 20K + device_write_iops: + - path: /dev/urandom + rate: 100 + force_kill: yes + register: device_write_limit_3 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - device_write_limit_1 is changed + - device_write_limit_2 is not changed + - device_write_limit_3 is changed + when: docker_py_version is version('1.9.0', '>=') +- assert: + that: + - device_write_limit_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in device_write_limit_1.msg" + - "'Minimum version required is 1.9.0 ' in device_write_limit_1.msg" + when: docker_py_version is version('1.9.0', '<') + +#################################################################### +## device_requests ################################################# +#################################################################### + +- name: device_requests + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + device_requests: [] + register: device_requests_1 + ignore_errors: yes + +- name: device_requests (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + device_requests: [] + register: device_requests_2 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - device_requests_1 is changed + - device_requests_2 is not changed + when: docker_py_version is version('4.3.0', '>=') and docker_api_version is version('1.40', '>=') +- assert: + that: + - device_requests_1 is failed + - | + (('version is ' ~ docker_py_version ~ ' ') in device_requests_1.msg and 'Minimum version required is 4.3.0 ' in device_requests_1.msg) or + (('API version is ' ~ docker_api_version ~ '.') in device_requests_1.msg and 'Minimum version required is 1.40 ' in device_requests_1.msg) + when: docker_py_version is version('4.3.0', '<') or docker_api_version is version('1.40', '<') + +#################################################################### +## dns_opts ######################################################## +#################################################################### + +- name: dns_opts + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_opts: + - "timeout:10" + - rotate + register: dns_opts_1 + ignore_errors: yes + +- name: dns_opts (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_opts: + - rotate + - "timeout:10" + register: dns_opts_2 + ignore_errors: yes + +- name: dns_opts (less resolv.conf options) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_opts: + - "timeout:10" + register: dns_opts_3 + ignore_errors: yes + +- name: dns_opts (more resolv.conf options) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_opts: + - "timeout:10" + - no-check-names + force_kill: yes + register: dns_opts_4 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - dns_opts_1 is changed + - dns_opts_2 is not changed + - dns_opts_3 is not changed + - dns_opts_4 is changed + when: docker_py_version is version('1.10.0', '>=') +- assert: + that: + - dns_opts_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in dns_opts_1.msg" + - "'Minimum version required is 1.10.0 ' in dns_opts_1.msg" + when: docker_py_version is version('1.10.0', '<') + +#################################################################### +## dns_search_domains ############################################## +#################################################################### + +- name: dns_search_domains + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_search_domains: + - example.com + - example.org + register: dns_search_domains_1 + +- name: dns_search_domains (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_search_domains: + - example.com + - example.org + register: dns_search_domains_2 + +- name: dns_search_domains (different order) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_search_domains: + - example.org + - example.com + force_kill: yes + register: dns_search_domains_3 + +- name: dns_search_domains (changed elements) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_search_domains: + - ansible.com + - example.com + force_kill: yes + register: dns_search_domains_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - dns_search_domains_1 is changed + - dns_search_domains_2 is not changed + - dns_search_domains_3 is changed + - dns_search_domains_4 is changed + +#################################################################### +## dns_servers ##################################################### +#################################################################### + +- name: dns_servers + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_servers: + - 1.1.1.1 + - 8.8.8.8 + register: dns_servers_1 + +- name: dns_servers (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_servers: + - 1.1.1.1 + - 8.8.8.8 + register: dns_servers_2 + +- name: dns_servers (changed order) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_servers: + - 8.8.8.8 + - 1.1.1.1 + force_kill: yes + register: dns_servers_3 + +- name: dns_servers (changed elements) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + dns_servers: + - 8.8.8.8 + - 9.9.9.9 + force_kill: yes + register: dns_servers_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - dns_servers_1 is changed + - dns_servers_2 is not changed + - dns_servers_3 is changed + - dns_servers_4 is changed + +#################################################################### +## domainname ###################################################### +#################################################################### + +- name: domainname + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + domainname: example.com + state: started + register: domainname_1 + +- name: domainname (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + domainname: example.com + state: started + register: domainname_2 + +- name: domainname (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + domainname: example.org + state: started + force_kill: yes + register: domainname_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - domainname_1 is changed + - domainname_2 is not changed + - domainname_3 is changed + +#################################################################### +## entrypoint ###################################################### +#################################################################### + +- name: entrypoint + docker_container: + image: "{{ docker_test_image_alpine }}" + entrypoint: + - /bin/sh + - "-v" + - "-c" + - "'sleep 10m'" + name: "{{ cname }}" + state: started + register: entrypoint_1 + +- name: entrypoint (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + entrypoint: + - /bin/sh + - "-v" + - "-c" + - "'sleep 10m'" + name: "{{ cname }}" + state: started + register: entrypoint_2 + +- name: entrypoint (change order, should not be idempotent) + docker_container: + image: "{{ docker_test_image_alpine }}" + entrypoint: + - /bin/sh + - "-c" + - "'sleep 10m'" + - "-v" + name: "{{ cname }}" + state: started + force_kill: yes + register: entrypoint_3 + +- name: entrypoint (less parameters) + docker_container: + image: "{{ docker_test_image_alpine }}" + entrypoint: + - /bin/sh + - "-c" + - "'sleep 10m'" + name: "{{ cname }}" + state: started + force_kill: yes + register: entrypoint_4 + +- name: entrypoint (other parameters) + docker_container: + image: "{{ docker_test_image_alpine }}" + entrypoint: + - /bin/sh + - "-c" + - "'sleep 5m'" + name: "{{ cname }}" + state: started + force_kill: yes + register: entrypoint_5 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - entrypoint_1 is changed + - entrypoint_2 is not changed + - entrypoint_3 is changed + - entrypoint_4 is changed + - entrypoint_5 is changed + +#################################################################### +## env ############################################################# +#################################################################### + +- name: env + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + env: + TEST1: val1 + TEST2: val2 + TEST3: "False" + TEST4: "true" + TEST5: "yes" + register: env_1 + +- name: env (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + env: + TEST2: val2 + TEST1: val1 + TEST5: "yes" + TEST3: "False" + TEST4: "true" + register: env_2 + +- name: env (less environment variables) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + env: + TEST1: val1 + register: env_3 + +- name: env (more environment variables) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + env: + TEST1: val1 + TEST3: val3 + force_kill: yes + register: env_4 + +- name: env (fail unwrapped values) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + env: + TEST1: true + force_kill: yes + register: env_5 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - env_1 is changed + - env_2 is not changed + - env_3 is not changed + - env_4 is changed + - env_5 is failed + - "('Non-string value found for env option.') in env_5.msg" + +#################################################################### +## env_file ######################################################### +#################################################################### + +- name: env_file + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + env_file: "{{ role_path }}/files/env-file" + register: env_file_1 + +- name: env_file (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + env_file: "{{ role_path }}/files/env-file" + register: env_file_2 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - env_file_1 is changed + - env_file_2 is not changed + +#################################################################### +## etc_hosts ####################################################### +#################################################################### + +- name: etc_hosts + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + etc_hosts: + example.com: 1.2.3.4 + example.org: 4.3.2.1 + register: etc_hosts_1 + +- name: etc_hosts (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + etc_hosts: + example.org: 4.3.2.1 + example.com: 1.2.3.4 + register: etc_hosts_2 + +- name: etc_hosts (less hosts) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + etc_hosts: + example.com: 1.2.3.4 + register: etc_hosts_3 + +- name: etc_hosts (more hosts) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + etc_hosts: + example.com: 1.2.3.4 + example.us: 1.2.3.5 + force_kill: yes + register: etc_hosts_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - etc_hosts_1 is changed + - etc_hosts_2 is not changed + - etc_hosts_3 is not changed + - etc_hosts_4 is changed + +#################################################################### +## exposed_ports ################################################### +#################################################################### + +- name: exposed_ports + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + exposed_ports: + - "9001" + - "9002" + register: exposed_ports_1 + +- name: exposed_ports (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + exposed_ports: + - "9002" + - "9001" + register: exposed_ports_2 + +- name: exposed_ports (less ports) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + exposed_ports: + - "9002" + register: exposed_ports_3 + +- name: exposed_ports (more ports) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + exposed_ports: + - "9002" + - "9003" + force_kill: yes + register: exposed_ports_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - exposed_ports_1 is changed + - exposed_ports_2 is not changed + - exposed_ports_3 is not changed + - exposed_ports_4 is changed + +#################################################################### +## force_kill ###################################################### +#################################################################### + +# TODO: - force_kill + +#################################################################### +## groups ########################################################## +#################################################################### + +- name: groups + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + groups: + - "1234" + - "5678" + register: groups_1 + +- name: groups (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + groups: + - "5678" + - "1234" + register: groups_2 + +- name: groups (less groups) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + groups: + - "1234" + register: groups_3 + +- name: groups (more groups) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + groups: + - "1234" + - "2345" + force_kill: yes + register: groups_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - groups_1 is changed + - groups_2 is not changed + - groups_3 is not changed + - groups_4 is changed + +#################################################################### +## healthcheck ##################################################### +#################################################################### + +- name: healthcheck + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + healthcheck: + test: + - CMD + - sleep + - 1 + timeout: 2s + interval: 0h0m2s3ms4us + retries: 2 + force_kill: yes + register: healthcheck_1 + ignore_errors: yes + +- name: healthcheck (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + healthcheck: + test: + - CMD + - sleep + - 1 + timeout: 2s + interval: 0h0m2s3ms4us + retries: 2 + force_kill: yes + register: healthcheck_2 + ignore_errors: yes + +- name: healthcheck (changed) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + healthcheck: + test: + - CMD + - sleep + - 1 + timeout: 3s + interval: 0h1m2s3ms4us + retries: 3 + force_kill: yes + register: healthcheck_3 + ignore_errors: yes + +- name: healthcheck (no change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + force_kill: yes + register: healthcheck_4 + ignore_errors: yes + +- name: healthcheck (disabled) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + healthcheck: + test: + - NONE + force_kill: yes + register: healthcheck_5 + ignore_errors: yes + +- name: healthcheck (disabled, idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + healthcheck: + test: + - NONE + force_kill: yes + register: healthcheck_6 + ignore_errors: yes + +- name: healthcheck (string in healthcheck test, changed) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + healthcheck: + test: "sleep 1" + force_kill: yes + register: healthcheck_7 + ignore_errors: yes + +- name: healthcheck (string in healthcheck test, idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + healthcheck: + test: "sleep 1" + force_kill: yes + register: healthcheck_8 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - healthcheck_1 is changed + - healthcheck_2 is not changed + - healthcheck_3 is changed + - healthcheck_4 is not changed + - healthcheck_5 is changed + - healthcheck_6 is not changed + - healthcheck_7 is changed + - healthcheck_8 is not changed + when: docker_py_version is version('2.0.0', '>=') +- assert: + that: + - healthcheck_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in healthcheck_1.msg" + - "'Minimum version required is 2.0.0 ' in healthcheck_1.msg" + when: docker_py_version is version('2.0.0', '<') + +#################################################################### +## hostname ######################################################## +#################################################################### + +- name: hostname + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + hostname: me.example.com + state: started + register: hostname_1 + +- name: hostname (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + hostname: me.example.com + state: started + register: hostname_2 + +- name: hostname (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + hostname: me.example.org + state: started + force_kill: yes + register: hostname_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - hostname_1 is changed + - hostname_2 is not changed + - hostname_3 is changed + +#################################################################### +## init ############################################################ +#################################################################### + +- name: init + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + init: yes + state: started + register: init_1 + ignore_errors: yes + +- name: init (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + init: yes + state: started + register: init_2 + ignore_errors: yes + +- name: init (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + init: no + state: started + force_kill: yes + register: init_3 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - init_1 is changed + - init_2 is not changed + - init_3 is changed + when: docker_py_version is version('2.2.0', '>=') +- assert: + that: + - init_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in init_1.msg" + - "'Minimum version required is 2.2.0 ' in init_1.msg" + when: docker_py_version is version('2.2.0', '<') + +#################################################################### +## interactive ##################################################### +#################################################################### + +- name: interactive + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + interactive: yes + state: started + register: interactive_1 + +- name: interactive (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + interactive: yes + state: started + register: interactive_2 + +- name: interactive (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + interactive: no + state: started + force_kill: yes + register: interactive_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - interactive_1 is changed + - interactive_2 is not changed + - interactive_3 is changed + +#################################################################### +## image / ignore_image ############################################ +#################################################################### + +- name: Pull {{ docker_test_image_hello_world }} image to make sure ignore_image test succeeds + # If the image isn't there, it will pull it and return 'changed'. + docker_image: + name: "{{ docker_test_image_hello_world }}" + source: pull + +- name: image + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + register: image_1 + +- name: image (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + register: image_2 + +- name: ignore_image + docker_container: + image: "{{ docker_test_image_hello_world }}" + ignore_image: yes + name: "{{ cname }}" + state: started + register: ignore_image + +- name: image change + docker_container: + image: "{{ docker_test_image_hello_world }}" + name: "{{ cname }}" + state: started + force_kill: yes + register: image_change + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - image_1 is changed + - image_2 is not changed + - ignore_image is not changed + - image_change is changed + +#################################################################### +## ipc_mode ######################################################## +#################################################################### + +- name: start helpers + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ container_name }}" + state: started + ipc_mode: shareable + loop: + - "{{ cname_h1 }}" + loop_control: + loop_var: container_name + +- name: ipc_mode + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + ipc_mode: "container:{{ cname_h1 }}" + # ipc_mode: shareable + register: ipc_mode_1 + +- name: ipc_mode (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + ipc_mode: "container:{{ cname_h1 }}" + # ipc_mode: shareable + register: ipc_mode_2 + +- name: ipc_mode (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + ipc_mode: private + force_kill: yes + register: ipc_mode_3 + +- name: cleanup + docker_container: + name: "{{ container_name }}" + state: absent + force_kill: yes + loop: + - "{{ cname }}" + - "{{ cname_h1 }}" + loop_control: + loop_var: container_name + diff: no + +- assert: + that: + - ipc_mode_1 is changed + - ipc_mode_2 is not changed + - ipc_mode_3 is changed + +#################################################################### +## kernel_memory ################################################### +#################################################################### + +- name: kernel_memory + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + kernel_memory: 8M + state: started + register: kernel_memory_1 + ignore_errors: yes + +- name: kernel_memory (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + kernel_memory: 8M + state: started + register: kernel_memory_2 + ignore_errors: yes + +- name: kernel_memory (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + kernel_memory: 6M + state: started + force_kill: yes + register: kernel_memory_3 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + ignore_errors: yes + +- assert: + that: + - kernel_memory_1 is changed + - kernel_memory_2 is not changed + - kernel_memory_3 is changed + when: kernel_memory_1 is not failed or 'kernel memory accounting disabled in this runc build' not in kernel_memory_1.msg + +#################################################################### +## kill_signal ##################################################### +#################################################################### + +# TODO: - kill_signal + +#################################################################### +## labels ########################################################## +#################################################################### + +- name: labels + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + labels: + ansible.test.1: hello + ansible.test.2: world + register: labels_1 + +- name: labels (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + labels: + ansible.test.2: world + ansible.test.1: hello + register: labels_2 + +- name: labels (less labels) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + labels: + ansible.test.1: hello + register: labels_3 + +- name: labels (more labels) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + labels: + ansible.test.1: hello + ansible.test.3: ansible + force_kill: yes + register: labels_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - labels_1 is changed + - labels_2 is not changed + - labels_3 is not changed + - labels_4 is changed + +#################################################################### +## links ########################################################### +#################################################################### + +- name: start helpers + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ container_name }}" + state: started + loop: + - "{{ cname_h1 }}" + - "{{ cname_h2 }}" + - "{{ cname_h3 }}" + loop_control: + loop_var: container_name + +- name: links + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + links: + - "{{ cname_h1 }}:test1" + - "{{ cname_h2 }}:test2" + register: links_1 + +- name: links (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + links: + - "{{ cname_h2 }}:test2" + - "{{ cname_h1 }}:test1" + register: links_2 + +- name: links (less links) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + links: + - "{{ cname_h1 }}:test1" + register: links_3 + +- name: links (more links) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + links: + - "{{ cname_h1 }}:test1" + - "{{ cname_h3 }}:test3" + force_kill: yes + register: links_4 + +- name: cleanup + docker_container: + name: "{{ container_name }}" + state: absent + force_kill: yes + loop: + - "{{ cname }}" + - "{{ cname_h1 }}" + - "{{ cname_h2 }}" + - "{{ cname_h3 }}" + loop_control: + loop_var: container_name + diff: no + +- assert: + that: + - links_1 is changed + - links_2 is not changed + - links_3 is not changed + - links_4 is changed + +#################################################################### +## log_driver ###################################################### +#################################################################### + +- name: log_driver + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + log_driver: json-file + register: log_driver_1 + +- name: log_driver (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + log_driver: json-file + register: log_driver_2 + +- name: log_driver (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + log_driver: syslog + force_kill: yes + register: log_driver_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - log_driver_1 is changed + - log_driver_2 is not changed + - log_driver_3 is changed + +#################################################################### +## log_options ##################################################### +#################################################################### + +- name: log_options + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + log_driver: json-file + log_options: + labels: production_status + env: os,customer + max-file: 5 + register: log_options_1 + +- name: log_options (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + log_driver: json-file + log_options: + env: os,customer + labels: production_status + max-file: 5 + register: log_options_2 + +- name: log_options (less log options) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + log_driver: json-file + log_options: + labels: production_status + register: log_options_3 + +- name: log_options (more log options) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + log_driver: json-file + log_options: + labels: production_status + max-size: 10m + force_kill: yes + register: log_options_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - log_options_1 is changed + - log_options_2 is not changed + - "'Non-string value found for log_options option \\'max-file\\'. The value is automatically converted to \\'5\\'. If this is not correct, or you want to +avoid such warnings, please quote the value.' in log_options_2.warnings" + - log_options_3 is not changed + - log_options_4 is changed + +#################################################################### +## mac_address ##################################################### +#################################################################### + +- name: mac_address + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + mac_address: 92:d0:c6:0a:29:33 + state: started + register: mac_address_1 + +- name: mac_address (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + mac_address: 92:d0:c6:0a:29:33 + state: started + register: mac_address_2 + +- name: mac_address (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + mac_address: 92:d0:c6:0a:29:44 + state: started + force_kill: yes + register: mac_address_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - mac_address_1 is changed + - mac_address_2 is not changed + - mac_address_3 is changed + +#################################################################### +## memory ########################################################## +#################################################################### + +- name: memory + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + memory: 64M + state: started + register: memory_1 + +- name: memory (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + memory: 64M + state: started + register: memory_2 + +- name: memory (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + memory: 48M + state: started + force_kill: yes + register: memory_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - memory_1 is changed + - memory_2 is not changed + - memory_3 is changed + +#################################################################### +## memory_reservation ############################################## +#################################################################### + +- name: memory_reservation + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + memory_reservation: 64M + state: started + register: memory_reservation_1 + +- name: memory_reservation (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + memory_reservation: 64M + state: started + register: memory_reservation_2 + +- name: memory_reservation (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + memory_reservation: 48M + state: started + force_kill: yes + register: memory_reservation_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - memory_reservation_1 is changed + - memory_reservation_2 is not changed + - memory_reservation_3 is changed + +#################################################################### +## memory_swap ##################################################### +#################################################################### + +- name: memory_swap + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + # Docker daemon does not accept memory_swap if memory is not specified + memory: 32M + memory_swap: 64M + state: started + debug: yes + register: memory_swap_1 + +- name: memory_swap (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + # Docker daemon does not accept memory_swap if memory is not specified + memory: 32M + memory_swap: 64M + state: started + debug: yes + register: memory_swap_2 + +- name: memory_swap (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + # Docker daemon does not accept memory_swap if memory is not specified + memory: 32M + memory_swap: 48M + state: started + force_kill: yes + debug: yes + register: memory_swap_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - memory_swap_1 is changed + # Sometimes (in particular during integration tests, maybe when not running + # on a proper VM), memory_swap cannot be set and will be -1 afterwards. + - memory_swap_2 is not changed or memory_swap_2.container.HostConfig.MemorySwap == -1 + - memory_swap_3 is changed + +- debug: var=memory_swap_1 + when: memory_swap_2 is changed +- debug: var=memory_swap_2 + when: memory_swap_2 is changed +- debug: var=memory_swap_3 + when: memory_swap_2 is changed + +#################################################################### +## memory_swappiness ############################################### +#################################################################### + +- name: memory_swappiness + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + memory_swappiness: 40 + state: started + register: memory_swappiness_1 + +- name: memory_swappiness (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + memory_swappiness: 40 + state: started + register: memory_swappiness_2 + +- name: memory_swappiness (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + memory_swappiness: 60 + state: started + force_kill: yes + register: memory_swappiness_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - memory_swappiness_1 is changed + - memory_swappiness_2 is not changed + - memory_swappiness_3 is changed + +#################################################################### +## oom_killer ###################################################### +#################################################################### + +- name: oom_killer + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + oom_killer: yes + state: started + register: oom_killer_1 + +- name: oom_killer (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + oom_killer: yes + state: started + register: oom_killer_2 + +- name: oom_killer (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + oom_killer: no + state: started + force_kill: yes + register: oom_killer_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - oom_killer_1 is changed + - oom_killer_2 is not changed + - oom_killer_3 is changed + +#################################################################### +## oom_score_adj ################################################### +#################################################################### + +- name: oom_score_adj + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + oom_score_adj: 5 + state: started + register: oom_score_adj_1 + +- name: oom_score_adj (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + oom_score_adj: 5 + state: started + register: oom_score_adj_2 + +- name: oom_score_adj (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + oom_score_adj: 7 + state: started + force_kill: yes + register: oom_score_adj_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - oom_score_adj_1 is changed + - oom_score_adj_2 is not changed + - oom_score_adj_3 is changed + +#################################################################### +## output_logs ##################################################### +#################################################################### + +# TODO: - output_logs + +#################################################################### +## paused ########################################################## +#################################################################### + +- name: paused + docker_container: + image: "{{ docker_test_image_alpine }}" + command: "/bin/sh -c 'sleep 10m'" + name: "{{ cname }}" + state: started + paused: yes + force_kill: yes + register: paused_1 + +- name: inspect paused + command: "docker inspect -f {% raw %}'{{.State.Status}} {{.State.Paused}}'{% endraw %} {{ cname }}" + register: paused_2 + +- name: paused (idempotent) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: "/bin/sh -c 'sleep 10m'" + name: "{{ cname }}" + state: started + paused: yes + force_kill: yes + register: paused_3 + +- name: paused (continue) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: "/bin/sh -c 'sleep 10m'" + name: "{{ cname }}" + state: started + paused: no + force_kill: yes + register: paused_4 + +- name: inspect paused + command: "docker inspect -f {% raw %}'{{.State.Status}} {{.State.Paused}}'{% endraw %} {{ cname }}" + register: paused_5 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - paused_1 is changed + - 'paused_2.stdout == "paused true"' + - paused_3 is not changed + - paused_4 is changed + - 'paused_5.stdout == "running false"' + +#################################################################### +## pid_mode ######################################################## +#################################################################### + +- name: start helpers + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname_h1 }}" + state: started + register: pid_mode_helper + +- name: pid_mode + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + pid_mode: "container:{{ pid_mode_helper.container.Id }}" + register: pid_mode_1 + ignore_errors: yes + # docker-py < 2.0 does not support "arbitrary" pid_mode values + +- name: pid_mode (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + pid_mode: "container:{{ cname_h1 }}" + register: pid_mode_2 + ignore_errors: yes + # docker-py < 2.0 does not support "arbitrary" pid_mode values + +- name: pid_mode (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + pid_mode: host + force_kill: yes + register: pid_mode_3 + +- name: cleanup + docker_container: + name: "{{ container_name }}" + state: absent + force_kill: yes + loop: + - "{{ cname }}" + - "{{ cname_h1 }}" + loop_control: + loop_var: container_name + diff: no + +- assert: + that: + - pid_mode_1 is changed + - pid_mode_2 is not changed + - pid_mode_3 is changed + when: docker_py_version is version('2.0.0', '>=') +- assert: + that: + - pid_mode_1 is failed + - pid_mode_2 is failed + - pid_mode_3 is changed + when: docker_py_version is version('2.0.0', '<') + +#################################################################### +## pids_limit ###################################################### +#################################################################### + +- name: pids_limit + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + pids_limit: 10 + register: pids_limit_1 + ignore_errors: yes + +- name: pids_limit (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + pids_limit: 10 + register: pids_limit_2 + ignore_errors: yes + +- name: pids_limit (changed) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + pids_limit: 20 + force_kill: yes + register: pids_limit_3 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - pids_limit_1 is changed + - pids_limit_2 is not changed + - pids_limit_3 is changed + when: docker_py_version is version('1.10.0', '>=') +- assert: + that: + - pids_limit_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in pids_limit_1.msg" + - "'Minimum version required is 1.10.0 ' in pids_limit_1.msg" + when: docker_py_version is version('1.10.0', '<') + +#################################################################### +## privileged ###################################################### +#################################################################### + +- name: privileged + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + privileged: yes + state: started + register: privileged_1 + +- name: privileged (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + privileged: yes + state: started + register: privileged_2 + +- name: privileged (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + privileged: no + state: started + force_kill: yes + register: privileged_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - privileged_1 is changed + - privileged_2 is not changed + - privileged_3 is changed + +#################################################################### +## published_ports and default_host_ip ############################# +#################################################################### + +- name: published_ports + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: + - '9001' + - '9002' + register: published_ports_1 + +- name: published_ports (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: + - '9002' + - '9001' + register: published_ports_2 + +- name: published_ports (less published_ports) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: + - '9002' + register: published_ports_3 + +- name: published_ports (more published_ports) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: + - '9002' + - '9003' + force_kill: yes + register: published_ports_4 + +- name: published_ports (ports with IP addresses) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: + - '127.0.0.1:9002:9002/tcp' + - '[::1]:9003:9003/tcp' + - '[fe80::1%test]:90:90/tcp' + force_kill: yes + register: published_ports_5 + +- name: published_ports (ports with IP addresses, idempotent) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: + - '127.0.0.1:9002:9002/tcp' + - '[::1]:9003:9003/tcp' + - '[fe80::1%test]:90:90/tcp' + register: published_ports_6 + +- name: published_ports (no published ports) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: [] + comparisons: + published_ports: strict + force_kill: yes + register: published_ports_7 + +- name: published_ports (default_host_ip not set) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: + - '9001' + - '9002' + force_kill: yes + register: published_ports_8 + +- name: published_ports (default_host_ip set to empty string) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: + - '9002' + - '9001' + default_host_ip: '' + force_kill: yes + register: published_ports_9 + +- name: published_ports (default_host_ip set to empty string, idempotent) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: + - '9002' + - '9001' + default_host_ip: '' + register: published_ports_10 + +- name: published_ports (default_host_ip unset) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: + - '9002' + - '9001' + force_kill: yes + register: published_ports_11 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - published_ports_1 is changed + - published_ports_2 is not changed + - published_ports_3 is not changed + - published_ports_4 is changed + - published_ports_5 is changed + - published_ports_6 is not changed + - published_ports_7 is changed + - published_ports_8 is changed + - published_ports_9 is changed + - published_ports_10 is not changed + - published_ports_11 is changed + +#################################################################### +## pull ############################################################ +#################################################################### + +# TODO: - pull + +#################################################################### +## read_only ####################################################### +#################################################################### + +- name: read_only + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + read_only: yes + state: started + register: read_only_1 + +- name: read_only (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + read_only: yes + state: started + register: read_only_2 + +- name: read_only (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + read_only: no + state: started + force_kill: yes + register: read_only_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - read_only_1 is changed + - read_only_2 is not changed + - read_only_3 is changed + +#################################################################### +## restart_policy ################################################## +#################################################################### + +- name: restart_policy + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + restart_policy: always + state: started + register: restart_policy_1 + +- name: restart_policy (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + restart_policy: always + state: started + register: restart_policy_2 + +- name: restart_policy (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + restart_policy: unless-stopped + state: started + force_kill: yes + register: restart_policy_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - restart_policy_1 is changed + - restart_policy_2 is not changed + - restart_policy_3 is changed + +#################################################################### +## restart_retries ################################################# +#################################################################### + +- name: restart_retries + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + restart_policy: on-failure + restart_retries: 5 + state: started + register: restart_retries_1 + +- name: restart_retries (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + restart_policy: on-failure + restart_retries: 5 + state: started + register: restart_retries_2 + +- name: restart_retries (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + restart_policy: on-failure + restart_retries: 2 + state: started + force_kill: yes + register: restart_retries_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - restart_retries_1 is changed + - restart_retries_2 is not changed + - restart_retries_3 is changed + +#################################################################### +## runtime ######################################################### +#################################################################### + +- name: runtime + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + runtime: runc + state: started + register: runtime_1 + ignore_errors: yes + +- name: runtime (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + runtime: runc + state: started + register: runtime_2 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - runtime_1 is changed + - runtime_2 is not changed + when: docker_py_version is version('2.4.0', '>=') +- assert: + that: + - runtime_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in runtime_1.msg" + - "'Minimum version required is 2.4.0 ' in runtime_1.msg" + when: docker_py_version is version('2.4.0', '<') + +#################################################################### +## security_opts ################################################### +#################################################################### + +# In case some of the options stop working, here are some more +# options which *currently* work with all integration test targets: +# no-new-privileges +# label:disable +# label=disable +# label:level:s0:c100,c200 +# label=level:s0:c100,c200 +# label:type:svirt_apache_t +# label=type:svirt_apache_t +# label:user:root +# label=user:root +# seccomp:unconfined +# seccomp=unconfined +# apparmor:docker-default +# apparmor=docker-default + +- name: security_opts + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + security_opts: + - "label:level:s0:c100,c200" + - "no-new-privileges" + register: security_opts_1 + +- name: security_opts (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + security_opts: + - "no-new-privileges" + - "label:level:s0:c100,c200" + register: security_opts_2 + +- name: security_opts (less security options) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + security_opts: + - "no-new-privileges" + register: security_opts_3 + +- name: security_opts (more security options) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + security_opts: + - "label:disable" + - "no-new-privileges" + force_kill: yes + register: security_opts_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - security_opts_1 is changed + - security_opts_2 is not changed + - security_opts_3 is not changed + - security_opts_4 is changed + +#################################################################### +## shm_size ######################################################## +#################################################################### + +- name: shm_size + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + shm_size: 96M + state: started + register: shm_size_1 + +- name: shm_size (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + shm_size: 96M + state: started + register: shm_size_2 + +- name: shm_size (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + shm_size: 75M + state: started + force_kill: yes + register: shm_size_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - shm_size_1 is changed + - shm_size_2 is not changed + - shm_size_3 is changed + +#################################################################### +## stop_signal ##################################################### +#################################################################### + +- name: stop_signal + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + stop_signal: "30" + state: started + register: stop_signal_1 + +- name: stop_signal (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + stop_signal: "30" + state: started + register: stop_signal_2 + +- name: stop_signal (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + stop_signal: "9" + state: started + force_kill: yes + register: stop_signal_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - stop_signal_1 is changed + - stop_signal_2 is not changed + - stop_signal_3 is changed + +#################################################################### +## stop_timeout #################################################### +#################################################################### + +- name: stop_timeout + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + stop_timeout: 2 + state: started + register: stop_timeout_1 + +- name: stop_timeout (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + stop_timeout: 2 + state: started + register: stop_timeout_2 + +- name: stop_timeout (no change) + # stop_timeout changes are ignored by default + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + stop_timeout: 1 + state: started + register: stop_timeout_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - stop_timeout_1 is changed + - stop_timeout_2 is not changed + - stop_timeout_3 is not changed + +#################################################################### +## sysctls ######################################################### +#################################################################### + +# In case some of the options stop working, here are some more +# options which *currently* work with all integration test targets: +# net.ipv4.conf.default.log_martians: 1 +# net.ipv4.conf.default.secure_redirects: 0 +# net.ipv4.conf.default.send_redirects: 0 +# net.ipv4.conf.all.log_martians: 1 +# net.ipv4.conf.all.accept_redirects: 0 +# net.ipv4.conf.all.secure_redirects: 0 +# net.ipv4.conf.all.send_redirects: 0 + +- name: sysctls + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + sysctls: + net.ipv4.icmp_echo_ignore_all: 1 + net.ipv4.ip_forward: 1 + register: sysctls_1 + ignore_errors: yes + +- name: sysctls (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + sysctls: + net.ipv4.ip_forward: 1 + net.ipv4.icmp_echo_ignore_all: 1 + register: sysctls_2 + ignore_errors: yes + +- name: sysctls (less sysctls) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + sysctls: + net.ipv4.icmp_echo_ignore_all: 1 + register: sysctls_3 + ignore_errors: yes + +- name: sysctls (more sysctls) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + sysctls: + net.ipv4.icmp_echo_ignore_all: 1 + net.ipv6.conf.default.accept_redirects: 0 + force_kill: yes + register: sysctls_4 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - sysctls_1 is changed + - sysctls_2 is not changed + - sysctls_3 is not changed + - sysctls_4 is changed + when: docker_py_version is version('1.10.0', '>=') +- assert: + that: + - sysctls_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in sysctls_1.msg" + - "'Minimum version required is 1.10.0 ' in sysctls_1.msg" + when: docker_py_version is version('1.10.0', '<') + +#################################################################### +## tmpfs ########################################################### +#################################################################### + +- name: tmpfs + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + tmpfs: + - "/test1:rw,noexec,nosuid,size=65536k" + - "/test2:rw,noexec,nosuid,size=65536k" + register: tmpfs_1 + +- name: tmpfs (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + tmpfs: + - "/test2:rw,noexec,nosuid,size=65536k" + - "/test1:rw,noexec,nosuid,size=65536k" + register: tmpfs_2 + +- name: tmpfs (less tmpfs) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + tmpfs: + - "/test1:rw,noexec,nosuid,size=65536k" + register: tmpfs_3 + +- name: tmpfs (more tmpfs) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + tmpfs: + - "/test1:rw,noexec,nosuid,size=65536k" + - "/test3:rw,noexec,nosuid,size=65536k" + force_kill: yes + register: tmpfs_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - tmpfs_1 is changed + - tmpfs_2 is not changed + - tmpfs_3 is not changed + - tmpfs_4 is changed + +#################################################################### +## tty ############################################################# +#################################################################### + +- name: tty + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + tty: yes + state: started + register: tty_1 + +- name: tty (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + tty: yes + state: started + register: tty_2 + +- name: tty (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + tty: no + state: started + force_kill: yes + register: tty_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - tty_1 is changed + - tty_2 is not changed + - tty_3 is changed + +#################################################################### +## ulimits ######################################################### +#################################################################### + +- name: ulimits + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + ulimits: + - "nofile:1234:1234" + - "nproc:3:6" + register: ulimits_1 + +- name: ulimits (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + ulimits: + - "nproc:3:6" + - "nofile:1234:1234" + register: ulimits_2 + +- name: ulimits (less ulimits) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + ulimits: + - "nofile:1234:1234" + register: ulimits_3 + +- name: ulimits (more ulimits) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + ulimits: + - "nofile:1234:1234" + - "sigpending:100:200" + force_kill: yes + register: ulimits_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - ulimits_1 is changed + - ulimits_2 is not changed + - ulimits_3 is not changed + - ulimits_4 is changed + +#################################################################### +## user ############################################################ +#################################################################### + +- name: user + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + user: nobody + state: started + register: user_1 + +- name: user (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + user: nobody + state: started + register: user_2 + +- name: user (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + user: root + state: started + force_kill: yes + register: user_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - user_1 is changed + - user_2 is not changed + - user_3 is changed + +#################################################################### +## userns_mode ##################################################### +#################################################################### + +- name: userns_mode + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + userns_mode: host + state: started + register: userns_mode_1 + ignore_errors: yes + +- name: userns_mode (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + userns_mode: host + state: started + register: userns_mode_2 + ignore_errors: yes + +- name: userns_mode (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + userns_mode: "" + state: started + force_kill: yes + register: userns_mode_3 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - userns_mode_1 is changed + - userns_mode_2 is not changed + - userns_mode_3 is changed + when: docker_py_version is version('1.10.0', '>=') +- assert: + that: + - userns_mode_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in userns_mode_1.msg" + - "'Minimum version required is 1.10.0 ' in userns_mode_1.msg" + when: docker_py_version is version('1.10.0', '<') + +#################################################################### +## uts ############################################################# +#################################################################### + +- name: uts + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + uts: host + state: started + register: uts_1 + ignore_errors: yes + +- name: uts (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + uts: host + state: started + register: uts_2 + ignore_errors: yes + +- name: uts (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + uts: "" + state: started + force_kill: yes + register: uts_3 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - uts_1 is changed + - uts_2 is not changed + - uts_3 is changed + when: docker_py_version is version('3.5.0', '>=') +- assert: + that: + - uts_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in uts_1.msg" + - "'Minimum version required is 3.5.0 ' in uts_1.msg" + when: docker_py_version is version('3.5.0', '<') + +#################################################################### +## working_dir ##################################################### +#################################################################### + +- name: working_dir + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + working_dir: /tmp + state: started + register: working_dir_1 + +- name: working_dir (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + working_dir: /tmp + state: started + register: working_dir_2 + +- name: working_dir (change) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + working_dir: / + state: started + force_kill: yes + register: working_dir_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - working_dir_1 is changed + - working_dir_2 is not changed + - working_dir_3 is changed + +#################################################################### +#################################################################### +#################################################################### diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/ports.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/ports.yml new file mode 100644 index 00000000..895cd236 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/ports.yml @@ -0,0 +1,286 @@ +--- +- name: Registering container name + set_fact: + cname: "{{ cname_prefix ~ '-options' }}" + cname2: "{{ cname_prefix ~ '-options-h1' }}" +- name: Registering container name + set_fact: + cnames: "{{ cnames + [cname, cname2] }}" + +#################################################################### +## published_ports: all ############################################ +#################################################################### + +- name: published_ports -- all + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + exposed_ports: + - "9001" + - "9002" + published_ports: + - all + force_kill: yes + register: published_ports_1 + +- name: published_ports -- all (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + exposed_ports: + - "9001" + - "9002" + published_ports: + - all + force_kill: yes + register: published_ports_2 + +- name: published_ports -- all (writing out 'all') + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + exposed_ports: + - "9001" + - "9002" + published_ports: + - "9001" + - "9002" + force_kill: yes + register: published_ports_3 + +- name: published_ports -- all (idempotency 2) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + exposed_ports: + - "9001" + - "9002" + published_ports: + - "9002" + - "9001" + force_kill: yes + register: published_ports_4 + +- name: published_ports -- all (switching back to 'all') + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + exposed_ports: + - "9001" + - "9002" + published_ports: + - all + force_kill: yes + register: published_ports_5 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - published_ports_1 is changed + - published_ports_2 is not changed + - published_ports_3 is changed + - published_ports_4 is not changed + - published_ports_5 is changed + +#################################################################### +## published_ports: port range ##################################### +#################################################################### + +- name: published_ports -- port range + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + exposed_ports: + - "9001" + - "9010-9050" + published_ports: + - "9001:9001" + - "9010-9050:9010-9050" + force_kill: yes + register: published_ports_1 + +- name: published_ports -- port range (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + exposed_ports: + - "9001" + - "9010-9050" + published_ports: + - "9001:9001" + - "9010-9050:9010-9050" + force_kill: yes + register: published_ports_2 + +- name: published_ports -- port range (different range) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + exposed_ports: + - "9001" + - "9010-9050" + published_ports: + - "9001:9001" + - "9020-9060:9020-9060" + force_kill: yes + register: published_ports_3 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - published_ports_1 is changed + - published_ports_2 is not changed + - published_ports_3 is changed + +#################################################################### +## published_ports: one-element container port range ############### +#################################################################### + +- name: published_ports -- one-element container port range + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ item }}" + state: started + published_ports: + - "9010-9050:9010" + force_kill: yes + loop: + - '{{ cname }}' + - '{{ cname2 }}' + register: published_ports_1 + +- name: published_ports -- one-element container port range (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ item }}" + state: started + published_ports: + - "9010-9050:9010" + force_kill: yes + loop: + - '{{ cname }}' + - '{{ cname2 }}' + register: published_ports_2 + +- name: published_ports -- one-element container port range (different range) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ item }}" + state: started + published_ports: + - "9010-9051:9010" + force_kill: yes + loop: + - '{{ cname }}' + - '{{ cname2 }}' + register: published_ports_3 + +- name: cleanup + docker_container: + name: "{{ item }}" + state: absent + force_kill: yes + loop: + - '{{ cname }}' + - '{{ cname2 }}' + diff: no + +- assert: + that: + - published_ports_1 is changed + - published_ports_2 is not changed + - published_ports_3 is changed + +#################################################################### +## published_ports: IPv6 addresses ################################# +#################################################################### + +- name: published_ports -- IPv6 + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: + - "[::1]:9001:9001" + force_kill: yes + register: published_ports_1 + +- name: published_ports -- IPv6 (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: + - "[::1]:9001:9001" + force_kill: yes + register: published_ports_2 + +- name: published_ports -- IPv6 (different IP) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: + - "127.0.0.1:9001:9001" + force_kill: yes + register: published_ports_3 + +- name: published_ports -- IPv6 (hostname) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + published_ports: + - "localhost:9001:9001" + force_kill: yes + register: published_ports_4 + ignore_errors: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - published_ports_1 is changed + - published_ports_2 is not changed + - published_ports_3 is changed + - published_ports_4 is failed diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/regression-45700-dont-parse-on-absent.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/regression-45700-dont-parse-on-absent.yml new file mode 100644 index 00000000..d5150153 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/regression-45700-dont-parse-on-absent.yml @@ -0,0 +1,34 @@ +--- +# Regression test for https://github.com/ansible/ansible/pull/45700 +- name: Registering container name + set_fact: + cname: "{{ cname_prefix ~ '-45700' }}" +- name: Registering container name + set_fact: + cnames: "{{ cnames + [cname] }}" + +- name: Start container + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + +- name: Stop container with a lot of invalid options + docker_container: + name: "{{ cname }}" + force_kill: yes + # Some options with "invalid" values, which would + # have to be parsed. The values are "invalid" because + # the containers and networks listed here do not exist. + # This can happen because the networks are removed + # before the container is stopped (see + # https://github.com/ansible/ansible/issues/45486). + networks: + - name: "nonexistant-network-{{ (2**32) | random }}" + published_ports: + - '1:2' + - '3' + links: + - "nonexistant-container-{{ (2**32) | random }}:test" + state: absent diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/start-stop.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/start-stop.yml new file mode 100644 index 00000000..ec9de7c5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/start-stop.yml @@ -0,0 +1,455 @@ +--- +- name: Registering container name + set_fact: + cname: "{{ cname_prefix ~ '-hi' }}" +- name: Registering container name + set_fact: + cnames: "{{ cnames + [cname] }}" + +#################################################################### +## Creation ######################################################## +#################################################################### + +- name: Create container (check) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: present + check_mode: yes + register: create_1 + +- name: Create container + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: present + register: create_2 + +- name: Create container (idempotent) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: present + register: create_3 + +- name: Create container (idempotent check) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: present + check_mode: yes + register: create_4 + +- assert: + that: + - create_1 is changed + - create_2 is changed + - create_3 is not changed + - create_4 is not changed + +#################################################################### +## Starting (after creation) ####################################### +#################################################################### + +- name: Start container (check) + docker_container: + name: "{{ cname }}" + state: started + check_mode: yes + register: start_1 + +- name: Start container + docker_container: + name: "{{ cname }}" + state: started + register: start_2 + +- name: Start container (idempotent) + docker_container: + name: "{{ cname }}" + state: started + register: start_3 + +- name: Start container (idempotent check) + docker_container: + name: "{{ cname }}" + state: started + check_mode: yes + register: start_4 + +- assert: + that: + - start_1 is changed + - start_2 is changed + - start_3 is not changed + - start_4 is not changed + +#################################################################### +## Present check for running container ############################# +#################################################################### + +- name: Present check for running container (check) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: present + check_mode: yes + register: present_check_1 + +- name: Present check for running container + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: present + register: present_check_2 + +- assert: + that: + - present_check_1 is not changed + - present_check_2 is not changed + +#################################################################### +## Starting (from scratch) ######################################### +#################################################################### + +- name: Remove container (setup for starting from scratch) + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + +- name: Start container from scratch (check) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + stop_timeout: 1 + name: "{{ cname }}" + state: started + check_mode: yes + register: start_scratch_1 + +- name: Start container from scratch + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + stop_timeout: 1 + name: "{{ cname }}" + state: started + register: start_scratch_2 + +- name: Start container from scratch (idempotent) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + stop_timeout: 1 + name: "{{ cname }}" + state: started + register: start_scratch_3 + +- name: Start container from scratch (idempotent check) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + stop_timeout: 1 + name: "{{ cname }}" + state: started + check_mode: yes + register: start_scratch_4 + +- assert: + that: + - start_scratch_1 is changed + - start_scratch_2 is changed + - start_scratch_3 is not changed + - start_scratch_4 is not changed + +#################################################################### +## Recreating ###################################################### +#################################################################### + +- name: Recreating container (created) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: present + force_kill: yes + register: recreate_1 + +- name: Recreating container (created, recreate, check mode) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + recreate: yes + state: present + force_kill: yes + register: recreate_2 + check_mode: yes + +- name: Recreating container (created, recreate) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + recreate: yes + state: present + force_kill: yes + register: recreate_3 + +- name: Recreating container (started) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + force_kill: yes + register: recreate_4 + +- name: Recreating container (started, recreate, check mode) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + recreate: yes + removal_wait_timeout: 10 + state: started + force_kill: yes + register: recreate_5 + check_mode: yes + +- name: Recreating container (started, recreate) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + recreate: yes + removal_wait_timeout: 10 + state: started + force_kill: yes + register: recreate_6 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- debug: var=recreate_1 +- debug: var=recreate_3 +- debug: var=recreate_4 +- debug: var=recreate_6 + +- assert: + that: + - recreate_2 is changed + - recreate_3 is changed + - recreate_4 is changed + - recreate_5 is changed + - recreate_6 is changed + - recreate_1.container.Id == recreate_2.container.Id + - recreate_1.container.Id != recreate_3.container.Id + - recreate_3.container.Id == recreate_4.container.Id + - recreate_4.container.Id == recreate_5.container.Id + - recreate_4.container.Id != recreate_6.container.Id + +#################################################################### +## Restarting ###################################################### +#################################################################### + +- name: Restarting + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + stop_timeout: 1 + volumes: + - /tmp/tmp + register: restart_1 + +- name: Restarting (restart, check mode) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + restart: yes + state: started + stop_timeout: 1 + force_kill: yes + register: restart_2 + check_mode: yes + +- name: Restarting (restart) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + restart: yes + state: started + stop_timeout: 1 + force_kill: yes + register: restart_3 + +- name: Restarting (verify volumes) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + stop_timeout: 1 + volumes: + - /tmp/tmp + register: restart_4 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - restart_1 is changed + - restart_2 is changed + - restart_3 is changed + - restart_1.container.Id == restart_3.container.Id + - restart_4 is not changed + +#################################################################### +## Stopping ######################################################## +#################################################################### + +- name: Stop container (check) + docker_container: + image: "{{ docker_test_image_alpine }}" + name: "{{ cname }}" + state: stopped + stop_timeout: 1 + check_mode: yes + register: stop_1 + +- name: Stop container + docker_container: + image: "{{ docker_test_image_alpine }}" + name: "{{ cname }}" + state: stopped + stop_timeout: 1 + register: stop_2 + +- name: Stop container (idempotent) + docker_container: + image: "{{ docker_test_image_alpine }}" + name: "{{ cname }}" + state: stopped + stop_timeout: 1 + register: stop_3 + +- name: Stop container (idempotent check) + docker_container: + image: "{{ docker_test_image_alpine }}" + name: "{{ cname }}" + state: stopped + stop_timeout: 1 + check_mode: yes + register: stop_4 + +- assert: + that: + - stop_1 is changed + - stop_2 is changed + - stop_3 is not changed + - stop_4 is not changed + +#################################################################### +## Removing ######################################################## +#################################################################### + +- name: Remove container (check) + docker_container: + name: "{{ cname }}" + state: absent + check_mode: yes + register: remove_1 + +- name: Remove container + docker_container: + name: "{{ cname }}" + state: absent + register: remove_2 + +- name: Remove container (idempotent) + docker_container: + name: "{{ cname }}" + state: absent + register: remove_3 + +- name: Remove container (idempotent check) + docker_container: + name: "{{ cname }}" + state: absent + check_mode: yes + register: remove_4 + +- assert: + that: + - remove_1 is changed + - remove_2 is changed + - remove_3 is not changed + - remove_4 is not changed + +#################################################################### +## Removing (from running) ######################################### +#################################################################### + +- name: Start container (setup for removing from running) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + +- name: Remove container from running (check) + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + check_mode: yes + register: remove_from_running_1 + +- name: Remove container from running + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + register: remove_from_running_2 + +- name: Remove container from running (idempotent) + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + register: remove_from_running_3 + +- name: Remove container from running (idempotent check) + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + check_mode: yes + register: remove_from_running_4 + +- assert: + that: + - remove_from_running_1 is changed + - remove_from_running_2 is changed + - remove_from_running_3 is not changed + - remove_from_running_4 is not changed diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container_info/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container_info/aliases new file mode 100644 index 00000000..6e8edef7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container_info/aliases @@ -0,0 +1,2 @@ +shippable/posix/group5 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container_info/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container_info/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container_info/tasks/main.yml new file mode 100644 index 00000000..8ecf2de3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_container_info/tasks/main.yml @@ -0,0 +1,80 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- block: + - name: Create random container name + set_fact: + cname: "{{ 'ansible-test-%0x' % ((2**32) | random) }}" + + - name: Make sure container is not there + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + + - name: Inspect a non-present container + docker_container_info: + name: "{{ cname }}" + register: result + + - assert: + that: + - "not result.exists" + - "'container' in result" + - "result.container is none" + + - name: Make sure container exists + docker_container: + name: "{{ cname }}" + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + state: started + force_kill: yes + + - name: Inspect a present container + docker_container_info: + name: "{{ cname }}" + register: result + - name: Dump docker_container_info result + debug: var=result + + - name: "Comparison: use 'docker inspect'" + command: docker inspect "{{ cname }}" + register: docker_inspect + ignore_errors: yes + - block: + - set_fact: + docker_inspect_result: "{{ docker_inspect.stdout | from_json }}" + - name: Dump docker inspect result + debug: var=docker_inspect_result + when: docker_inspect is not failed + + - assert: + that: + - result.exists + - "'container' in result" + - "result.container" + + - assert: + that: + - "result.container == docker_inspect_result[0]" + when: docker_inspect is not failed + - assert: + that: + - "'is too new. Maximum supported API version is' in docker_inspect.stderr" + when: docker_inspect is failed + + always: + - name: Cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + + when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_container_info tests!" + when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_host_info/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_host_info/aliases new file mode 100644 index 00000000..02b78723 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_host_info/aliases @@ -0,0 +1,2 @@ +shippable/posix/group4 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_host_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_host_info/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_host_info/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_host_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_host_info/tasks/main.yml new file mode 100644 index 00000000..1f0e10a5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_host_info/tasks/main.yml @@ -0,0 +1,10 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- include_tasks: test_host_info.yml + when: docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.21', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_host_info tests!" + when: not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.21', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_host_info/tasks/test_host_info.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_host_info/tasks/test_host_info.yml new file mode 100644 index 00000000..ef2a58a6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_host_info/tasks/test_host_info.yml @@ -0,0 +1,296 @@ +--- +- name: Create random container/volume name + set_fact: + cname: "{{ 'ansible-test-%0x' % ((2**32) | random) }}" + vname: "{{ 'ansible-test-%0x' % ((2**32) | random) }}" + +- debug: + msg: "Using container name '{{ cname }}' and volume name '{{ vname }}'" + +- block: + - name: Get info on Docker host + docker_host_info: + register: output + + - name: assert reading docker host facts when docker is running + assert: + that: + - 'output.host_info.Name is string' + - 'output.containers is not defined' + - 'output.networks is not defined' + - 'output.volumes is not defined' + - 'output.images is not defined' + - 'output.disk_usage is not defined' + +# Container and volume are created so that all lists are non-empty: +# * container and volume lists are non-emtpy because of the created objects; +# * image list is non-empty because the image of the container is there; +# * network list is always non-empty (default networks). + - name: Create container + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + register: container_output + + - assert: + that: + - container_output is changed + + - name: Create a volume + docker_volume: + name: "{{ vname }}" + register: volume_output + + - assert: + that: + - volume_output is changed + + - name: Get info on Docker host and list containers + docker_host_info: + containers: yes + register: output + + - name: assert reading docker host facts when docker is running and list containers + assert: + that: + - 'output.host_info.Name is string' + - 'output.networks is not defined' + - 'output.volumes is not defined' + - 'output.images is not defined' + - 'output.disk_usage is not defined' + - 'output.containers[0].Image is string' + - 'output.containers[0].ImageID is not defined' + + - name: Get info on Docker host and list containers with verbose output + docker_host_info: + containers: yes + verbose_output: yes + register: output + + - name: assert reading docker host facts when docker is running and list containers with verbose output + assert: + that: + - 'output.host_info.Name is string' + - 'output.networks is not defined' + - 'output.volumes is not defined' + - 'output.images is not defined' + - 'output.disk_usage is not defined' + - 'output.containers[0].Image is string' + - 'output.containers[0].ImageID is string' + + - name: Get info on Docker host and list images + docker_host_info: + images: yes + register: output + + - name: assert reading docker host facts when docker is running and list images + assert: + that: + - 'output.host_info.Name is string' + - 'output.containers is not defined' + - 'output.networks is not defined' + - 'output.volumes is not defined' + - 'output.images[0].Id is string' + - 'output.images[0].ParentId is not defined' + - 'output.disk_usage is not defined' + + - name: Get info on Docker host and list images with verbose output + docker_host_info: + images: yes + verbose_output: yes + register: output + + - name: assert reading docker host facts when docker is running and list images with verbose output + assert: + that: + - 'output.host_info.Name is string' + - 'output.containers is not defined' + - 'output.networks is not defined' + - 'output.volumes is not defined' + - 'output.images[0].Id is string' + - 'output.images[0].ParentId is string' + - 'output.disk_usage is not defined' + + - name: Get info on Docker host and list networks + docker_host_info: + networks: yes + register: output + + - name: assert reading docker host facts when docker is running and list networks + assert: + that: + - 'output.host_info.Name is string' + - 'output.containers is not defined' + - 'output.networks[0].Id is string' + - 'output.networks[0].Created is not defined' + - 'output.volumes is not defined' + - 'output.images is not defined' + - 'output.disk_usage is not defined' + + - name: Get info on Docker host and list networks with verbose output + docker_host_info: + networks: yes + verbose_output: yes + register: output + + - name: assert reading docker host facts when docker is running and list networks with verbose output + assert: + that: + - 'output.host_info.Name is string' + - 'output.containers is not defined' + - 'output.networks[0].Id is string' + - 'output.networks[0].Created is string' + - 'output.volumes is not defined' + - 'output.images is not defined' + - 'output.disk_usage is not defined' + + - name: Get info on Docker host and list volumes + docker_host_info: + volumes: yes + register: output + + - name: assert reading docker host facts when docker is running and list volumes + assert: + that: + - 'output.host_info.Name is string' + - 'output.containers is not defined' + - 'output.networks is not defined' + - 'output.volumes[0].Name is string' + - 'output.volumes[0].Mountpoint is not defined' + - 'output.images is not defined' + - 'output.disk_usage is not defined' + + - name: Get info on Docker host and list volumes with verbose output + docker_host_info: + volumes: yes + verbose_output: yes + register: output + + - name: assert reading docker host facts when docker is running and list volumes with verbose output + assert: + that: + - 'output.host_info.Name is string' + - 'output.containers is not defined' + - 'output.networks is not defined' + - 'output.volumes[0].Name is string' + - 'output.volumes[0].Mountpoint is string' + - 'output.images is not defined' + - 'output.disk_usage is not defined' + + - name: Get info on Docker host and get disk usage + docker_host_info: + disk_usage: yes + register: output + ignore_errors: yes + + - name: assert reading docker host facts when docker is running and get disk usage + assert: + that: + - 'output.host_info.Name is string' + - 'output.containers is not defined' + - 'output.networks is not defined' + - 'output.volumes is not defined' + - 'output.images is not defined' + - 'output.disk_usage.LayersSize is number' + - 'output.disk_usage.BuilderSize is not defined' + when: docker_py_version is version('2.2.0', '>=') + - assert: + that: + - output is failed + - "('version is ' ~ docker_py_version ~ ' ') in output.msg" + - "'Minimum version required is 2.2.0 ' in output.msg" + when: docker_py_version is version('2.2.0', '<') + + - name: Get info on Docker host and get disk usage with verbose output + docker_host_info: + disk_usage: yes + verbose_output: yes + register: output + ignore_errors: yes + + - name: assert reading docker host facts when docker is running and get disk usage with verbose output + assert: + that: + - 'output.host_info.Name is string' + - 'output.containers is not defined' + - 'output.networks is not defined' + - 'output.volumes is not defined' + - 'output.images is not defined' + - 'output.disk_usage.LayersSize is number' + - 'output.disk_usage.BuilderSize is number' + when: docker_py_version is version('2.2.0', '>=') + - assert: + that: + - output is failed + - "('version is ' ~ docker_py_version ~ ' ') in output.msg" + - "'Minimum version required is 2.2.0 ' in output.msg" + when: docker_py_version is version('2.2.0', '<') + + - name: Get info on Docker host, disk usage and get all lists together + docker_host_info: + volumes: yes + containers: yes + networks: yes + images: yes + disk_usage: "{{ docker_py_version is version('2.2.0', '>=') }}" + register: output + + - name: assert reading docker host facts when docker is running, disk usage and get lists together + assert: + that: + - 'output.host_info.Name is string' + - 'output.containers[0].Image is string' + - 'output.containers[0].ImageID is not defined' + - 'output.networks[0].Id is string' + - 'output.networks[0].Created is not defined' + - 'output.volumes[0].Name is string' + - 'output.volumes[0].Mountpoint is not defined' + - 'output.images[0].Id is string' + - 'output.images[0].ParentId is not defined' + - assert: + that: + - 'output.disk_usage.LayersSize is number' + - 'output.disk_usage.BuilderSize is not defined' + when: docker_py_version is version('2.2.0', '>=') + + - name: Get info on Docker host, disk usage and get all lists together with verbose output + docker_host_info: + volumes: yes + containers: yes + networks: yes + images: yes + disk_usage: "{{ docker_py_version is version('2.2.0', '>=') }}" + verbose_output: yes + register: output + + - name: assert reading docker host facts when docker is running and get disk usage with verbose output + assert: + that: + - 'output.host_info.Name is string' + - 'output.containers[0].Image is string' + - 'output.containers[0].ImageID is string' + - 'output.networks[0].Id is string' + - 'output.networks[0].Created is string' + - 'output.volumes[0].Name is string' + - 'output.volumes[0].Mountpoint is string' + - 'output.images[0].Id is string' + - 'output.images[0].ParentId is string' + - assert: + that: + - 'output.disk_usage.LayersSize is number' + - 'output.disk_usage.BuilderSize is number' + when: docker_py_version is version('2.2.0', '>=') + + always: + - name: Delete container + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + + - name: Delete volume + docker_volume: + name: "{{ vname }}" + state: absent diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/aliases new file mode 100644 index 00000000..02b78723 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/aliases @@ -0,0 +1,2 @@ +shippable/posix/group4 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/meta/main.yml new file mode 100644 index 00000000..21d7a58f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker_registry diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/main.yml new file mode 100644 index 00000000..2be493eb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/main.yml @@ -0,0 +1,8 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6'] + include_tasks: + file: test.yml diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/run-test.yml new file mode 100644 index 00000000..a2999370 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/run-test.yml @@ -0,0 +1,3 @@ +--- +- name: "Loading tasks from {{ item }}" + include_tasks: "{{ item }}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/test.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/test.yml new file mode 100644 index 00000000..023f8ccd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/test.yml @@ -0,0 +1,49 @@ +--- +- name: Create random name prefix + set_fact: + name_prefix: "{{ 'ansible-test-%0x' % ((2**32) | random) }}" +- name: Create image and container list + set_fact: + inames: [] + cnames: [] + +- debug: + msg: "Using name prefix {{ name_prefix }}" + +- name: Create files directory + file: + path: '{{ output_dir }}/files' + state: directory + +- name: Template files + template: + src: '{{ item }}' + dest: '{{ output_dir }}/files/{{ item }}' + loop: + - Dockerfile + - EtcHostsDockerfile + - MyDockerfile + - StagedDockerfile + +- block: + - include_tasks: run-test.yml + with_fileglob: + - "tests/*.yml" + + always: + - name: "Make sure all images are removed" + docker_image: + name: "{{ item }}" + state: absent + with_items: "{{ inames }}" + - name: "Make sure all containers are removed" + docker_container: + name: "{{ item }}" + state: absent + force_kill: yes + with_items: "{{ cnames }}" + + when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_image tests!" + when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/basic.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/basic.yml new file mode 100644 index 00000000..23a0e148 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/basic.yml @@ -0,0 +1,78 @@ +--- +#################################################################### +## basic ########################################################### +#################################################################### + +- name: Make sure image is not there + docker_image: + name: "{{ docker_test_image_hello_world }}" + state: absent + force_absent: yes + register: absent_1 + +- name: Make sure image is not there (idempotency) + docker_image: + name: "{{ docker_test_image_hello_world }}" + state: absent + register: absent_2 + +- assert: + that: + - absent_2 is not changed + +- name: Make sure image is there + docker_image: + name: "{{ docker_test_image_hello_world }}" + state: present + source: pull + register: present_1 + +- name: Make sure image is there (idempotent) + docker_image: + name: "{{ docker_test_image_hello_world }}" + state: present + source: pull + register: present_2 + +- assert: + that: + - present_1 is changed + - present_2 is not changed + +- name: Make sure tag is not there + docker_image: + name: "{{ docker_test_image_hello_world_base }}:alias" + state: absent + +- name: Tag image with alias + docker_image: + source: local + name: "{{ docker_test_image_hello_world }}" + repository: "{{ docker_test_image_hello_world_base }}:alias" + register: tag_1 + +- name: Tag image with alias (idempotent) + docker_image: + source: local + name: "{{ docker_test_image_hello_world }}" + repository: "{{ docker_test_image_hello_world_base }}:alias" + register: tag_2 + +- name: Tag image with alias (force, still idempotent) + docker_image: + source: local + name: "{{ docker_test_image_hello_world }}" + repository: "{{ docker_test_image_hello_world_base }}:alias" + force_tag: yes + register: tag_3 + +- assert: + that: + - tag_1 is changed + - tag_2 is not changed + - tag_3 is not changed + +- name: Cleanup alias tag + docker_image: + name: "{{ docker_test_image_hello_world_base }}:alias" + state: absent diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/docker_image.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/docker_image.yml new file mode 100644 index 00000000..f256f8d8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/docker_image.yml @@ -0,0 +1,228 @@ +--- +- name: Registering image name + set_fact: + iname: "{{ name_prefix ~ '-options' }}" + +- name: Determining pushed image names + set_fact: + hello_world_image_base: "{{ registry_address }}/test/hello-world" + test_image_base: "{{ registry_address }}/test/{{ iname }}" + +- name: Registering image name + set_fact: + inames: "{{ inames + [iname, test_image_base ~ ':latest', hello_world_image_base ~ ':latest', hello_world_image_base ~ ':newtag', hello_world_image_base ~ ':newtag2'] }}" + +#################################################################### +## interact with test registry ##################################### +#################################################################### + +- name: Make sure image is not there + docker_image: + name: "{{ hello_world_image_base }}:latest" + state: absent + force_absent: yes + +- name: Make sure we have {{ docker_test_image_hello_world }} + docker_image: + name: "{{ docker_test_image_hello_world }}" + source: pull + +- name: Push image to test registry + docker_image: + name: "{{ docker_test_image_hello_world }}" + repository: "{{ hello_world_image_base }}:latest" + push: yes + source: local + register: push_1 + +- name: Push image to test registry (idempotent) + docker_image: + name: "{{ docker_test_image_hello_world }}" + repository: "{{ hello_world_image_base }}:latest" + push: yes + source: local + register: push_2 + +- name: Push image to test registry (force, still idempotent) + docker_image: + name: "{{ docker_test_image_hello_world }}" + repository: "{{ hello_world_image_base }}:latest" + push: yes + source: local + force_tag: yes + register: push_3 + +- assert: + that: + - push_1 is changed + - push_2 is not changed + - push_3 is not changed + +- name: Get facts of local image + docker_image_info: + name: "{{ hello_world_image_base }}:latest" + register: facts_1 + +- name: Make sure image is not there + docker_image: + name: "{{ hello_world_image_base }}:latest" + state: absent + force_absent: yes + +- name: Get facts of local image (absent) + docker_image_info: + name: "{{ hello_world_image_base }}:latest" + register: facts_2 + +- name: Pull image from test registry + docker_image: + name: "{{ hello_world_image_base }}:latest" + state: present + source: pull + register: pull_1 + +- name: Pull image from test registry (idempotency) + docker_image: + name: "{{ hello_world_image_base }}:latest" + state: present + source: pull + register: pull_2 + +- name: Get facts of local image (present) + docker_image_info: + name: "{{ hello_world_image_base }}:latest" + register: facts_3 + +- assert: + that: + - pull_1 is changed + - pull_2 is not changed + - facts_1.images | length == 1 + - facts_2.images | length == 0 + - facts_3.images | length == 1 + +- name: Tag different image with new tag + docker_image: + name: "{{ docker_test_image_alpine_different }}" + repository: "{{ hello_world_image_base }}:newtag" + push: no + source: pull + +- name: Push different image with new tag + docker_image: + name: "{{ hello_world_image_base }}" + repository: "{{ hello_world_image_base }}" + tag: newtag + push: yes + source: local + register: push_1_different + +- name: Push different image with new tag (idempotent) + docker_image: + name: "{{ hello_world_image_base }}" + repository: "{{ hello_world_image_base }}" + tag: newtag + push: yes + source: local + register: push_2_different + +- assert: + that: + - push_1_different is changed + - push_2_different is not changed + +- name: Tag same image with new tag + docker_image: + name: "{{ docker_test_image_alpine_different }}" + repository: "{{ hello_world_image_base }}:newtag2" + push: no + source: pull + +- name: Push same image with new tag + docker_image: + name: "{{ hello_world_image_base }}" + repository: "{{ hello_world_image_base }}" + tag: newtag2 + push: yes + source: local + register: push_1_same + +- name: Push same image with new tag (idempotent) + docker_image: + name: "{{ hello_world_image_base }}" + repository: "{{ hello_world_image_base }}" + tag: newtag2 + push: yes + source: local + register: push_2_same + +- assert: + that: + # NOTE: This should be: + # - push_1_same is changed + # Unfortunately docker does *NOT* report whether the tag already existed or not. + # Here are the logs returned by client.push() for both tasks (which are exactly the same): + # push_1_same: + # {"status": "The push refers to repository [localhost:32796/test/hello-world]"}, + # {"id": "3fc64803ca2d", "progressDetail": {}, "status": "Preparing"}, + # {"id": "3fc64803ca2d", "progressDetail": {}, "status": "Layer already exists"}, + # {"status": "newtag2: digest: sha256:92251458088c638061cda8fd8b403b76d661a4dc6b7ee71b6affcf1872557b2b size: 528"}, + # {"aux": {"Digest": "sha256:92251458088c638061cda8fd8b403b76d661a4dc6b7ee71b6affcf1872557b2b", "Size": 528, "Tag": "newtag2"}, "progressDetail": {}} + # push_2_same: + # {"status": "The push refers to repository [localhost:32796/test/hello-world]"}, + # {"id": "3fc64803ca2d", "progressDetail": {}, "status": "Preparing"}, + # {"id": "3fc64803ca2d", "progressDetail": {}, "status": "Layer already exists"}, + # {"status": "newtag2: digest: sha256:92251458088c638061cda8fd8b403b76d661a4dc6b7ee71b6affcf1872557b2b size: 528"}, + # {"aux": {"Digest": "sha256:92251458088c638061cda8fd8b403b76d661a4dc6b7ee71b6affcf1872557b2b", "Size": 528, "Tag": "newtag2"}, "progressDetail": {}} + - push_1_same is not changed + - push_2_same is not changed + +#################################################################### +## repository ###################################################### +#################################################################### + +- name: Make sure image is not there + docker_image: + name: "{{ test_image_base }}:latest" + state: absent + force_absent: yes + +- name: repository + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + pull: no + repository: "{{ test_image_base }}" + source: build + register: repository_1 + +- name: repository (idempotent) + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + pull: no + repository: "{{ test_image_base }}" + source: build + register: repository_2 + +- assert: + that: + - repository_1 is changed + - repository_2 is not changed + +- name: Get facts of image + docker_image_info: + name: "{{ test_image_base }}:latest" + register: facts_1 + +- name: cleanup + docker_image: + name: "{{ test_image_base }}:latest" + state: absent + force_absent: yes + +- assert: + that: + - facts_1.images | length == 1 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/options.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/options.yml new file mode 100644 index 00000000..a966ea2c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/options.yml @@ -0,0 +1,389 @@ +--- +- name: Registering image name + set_fact: + iname: "{{ name_prefix ~ '-options' }}" + iname_1: "{{ name_prefix ~ '-options-1' }}" + +- name: Registering image name + set_fact: + inames: "{{ inames + [iname, iname_1] }}" + +#################################################################### +## build.args ###################################################### +#################################################################### + +- name: cleanup + docker_image: + name: "{{ iname }}" + state: absent + force_absent: yes + +- name: buildargs + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + args: + TEST1: val1 + TEST2: val2 + TEST3: "True" + pull: no + source: build + register: buildargs_1 + ignore_errors: yes + +- name: buildargs (idempotency) + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + args: + TEST1: val1 + TEST2: val2 + TEST3: "True" + pull: no + source: build + register: buildargs_2 + ignore_errors: yes + +- name: cleanup + docker_image: + name: "{{ iname }}" + state: absent + force_absent: yes + +- assert: + that: + - buildargs_1 is changed + - buildargs_2 is not failed and buildargs_2 is not changed + when: docker_py_version is version('1.6.0', '>=') + +- assert: + that: + - buildargs_1 is failed + - buildargs_2 is failed + when: docker_py_version is version('1.6.0', '<') + +#################################################################### +## build.container_limits ########################################## +#################################################################### + +- name: container_limits (Failed due to min memory limit) + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + container_limits: + memory: 4000 + pull: no + source: build + ignore_errors: yes + register: container_limits_1 + +- name: container_limits + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + container_limits: + memory: 7000000 + memswap: 8000000 + pull: no + source: build + register: container_limits_2 + +- name: cleanup + docker_image: + name: "{{ iname }}" + state: absent + force_absent: yes + +- assert: + that: + # It *sometimes* happens that the first task does not fail. + # For now, we work around this by + # a) requiring that if it fails, the message must + # contain 'Minimum memory limit allowed is (4|6)MB', and + # b) requiring that either the first task, or the second + # task is changed, but not both. + - "not container_limits_1 is failed or ('Minimum memory limit allowed is ') in container_limits_1.msg" + - "container_limits_1 is changed or container_limits_2 is changed and not (container_limits_1 is changed and container_limits_2 is changed)" + +#################################################################### +## build.dockerfile ################################################ +#################################################################### + +- name: dockerfile + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + dockerfile: "MyDockerfile" + pull: no + source: build + register: dockerfile_1 + +- name: cleanup + docker_image: + name: "{{ iname }}" + state: absent + force_absent: yes + +- assert: + that: + - dockerfile_1 is changed + - "('FROM ' ~ docker_test_image_alpine) in dockerfile_1.stdout" + - dockerfile_1['image']['Config']['WorkingDir'] == '/newdata' + +#################################################################### +## build.platform ################################################## +#################################################################### + +- name: cleanup + docker_image: + name: "{{ iname }}" + state: absent + force_absent: yes + +- name: build.platform + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + platform: linux + pull: no + source: build + register: platform_1 + ignore_errors: yes + +- name: build.platform (idempotency) + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + platform: linux + pull: no + source: build + register: platform_2 + ignore_errors: yes + +- name: cleanup + docker_image: + name: "{{ iname }}" + state: absent + force_absent: yes + +- assert: + that: + - platform_1 is changed + - platform_2 is not failed and platform_2 is not changed + when: docker_py_version is version('3.0.0', '>=') + +- assert: + that: + - platform_1 is failed + - platform_2 is failed + when: docker_py_version is version('3.0.0', '<') + +#################################################################### +## force ########################################################### +#################################################################### + +- name: Build an image + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + pull: no + source: build + +- name: force (changed) + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + dockerfile: "MyDockerfile" + pull: no + source: build + force_source: yes + register: force_1 + +- name: force (unchanged) + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + dockerfile: "MyDockerfile" + pull: no + source: build + force_source: yes + register: force_2 + +- name: cleanup + docker_image: + name: "{{ iname }}" + state: absent + force_absent: yes + +- assert: + that: + - force_1 is changed + - force_2 is not changed + +#################################################################### +## load path ####################################################### +#################################################################### + +- name: Archive image + docker_image: + name: "{{ docker_test_image_hello_world }}" + archive_path: "{{ output_dir }}/image.tar" + source: pull + register: archive_image + +- name: Create invalid archive + copy: + dest: "{{ output_dir }}/image-invalid.tar" + content: "this is not a valid image" + +- name: remove image + docker_image: + name: "{{ docker_test_image_hello_world }}" + state: absent + force_absent: yes + +- name: load image (changed) + docker_image: + name: "{{ docker_test_image_hello_world }}" + load_path: "{{ output_dir }}/image.tar" + source: load + register: load_image + +- name: load image (idempotency) + docker_image: + name: "{{ docker_test_image_hello_world }}" + load_path: "{{ output_dir }}/image.tar" + source: load + register: load_image_1 + +- name: load image (wrong name) + docker_image: + name: foo:bar + load_path: "{{ output_dir }}/image.tar" + source: load + register: load_image_2 + ignore_errors: true + +- name: load image (invalid image) + docker_image: + name: foo:bar + load_path: "{{ output_dir }}/image-invalid.tar" + source: load + register: load_image_3 + ignore_errors: true + +- name: load image (invalid image, old API version) + docker_image: + name: foo:bar + load_path: "{{ output_dir }}/image-invalid.tar" + source: load + api_version: "1.22" + register: load_image_4 + +- assert: + that: + - load_image is changed + - archive_image['image']['Id'] == load_image['image']['Id'] + - load_image_1 is not changed + - load_image_2 is failed + - >- + "The archive did not contain image 'foo:bar'. Instead, found '" ~ docker_test_image_hello_world ~ "'." == load_image_2.msg + - load_image_3 is failed + - '"Detected no loaded images. Archive potentially corrupt?" == load_image_3.msg' + - load_image_4 is changed + - "'The API version of your Docker daemon is < 1.23, which does not return the image loading result from the Docker daemon. Therefore, we cannot verify whether the expected image was loaded, whether multiple images where loaded, or whether the load actually succeeded. You should consider upgrading your Docker daemon.' in load_image_4.warnings" + +#################################################################### +## build.path ###################################################### +#################################################################### + +- name: Build image + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + pull: no + source: build + register: path_1 + +- name: Build image (idempotency) + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + pull: no + source: build + register: path_2 + +- name: cleanup + docker_image: + name: "{{ iname }}" + state: absent + force_absent: yes + +- assert: + that: + - path_1 is changed + - path_2 is not changed + +#################################################################### +## build.target #################################################### +#################################################################### + +- name: Build multi-stage image + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + dockerfile: "StagedDockerfile" + target: first + pull: no + source: build + register: dockerfile_2 + +- name: cleanup + docker_image: + name: "{{ iname }}" + state: absent + force_absent: yes + +- assert: + that: + - dockerfile_2 is changed + - dockerfile_2.image.Config.WorkingDir == '/first' + +#################################################################### +## build.etc_hosts ################################################# +#################################################################### + +- name: Build image with custom etc_hosts + docker_image: + name: "{{ iname }}" + build: + path: "{{ output_dir }}/files" + dockerfile: "EtcHostsDockerfile" + pull: no + etc_hosts: + some-custom-host: "127.0.0.1" + source: build + register: path_1 + +- name: cleanup + docker_image: + name: "{{ iname }}" + state: absent + force_absent: yes + +- assert: + that: + - path_1 is changed diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/Dockerfile b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/Dockerfile new file mode 100644 index 00000000..c5032944 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/Dockerfile @@ -0,0 +1,3 @@ +FROM {{ docker_test_image_busybox }} +ENV foo /bar +WORKDIR ${foo} diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/EtcHostsDockerfile b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/EtcHostsDockerfile new file mode 100644 index 00000000..f1b54e3b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/EtcHostsDockerfile @@ -0,0 +1,3 @@ +FROM {{ docker_test_image_busybox }} +# This should fail building if docker cannot resolve some-custom-host +RUN ping -c1 some-custom-host diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/MyDockerfile b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/MyDockerfile new file mode 100644 index 00000000..68bca8a2 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/MyDockerfile @@ -0,0 +1,5 @@ +FROM {{ docker_test_image_alpine }} +ENV INSTALL_PATH /newdata +RUN mkdir -p $INSTALL_PATH + +WORKDIR $INSTALL_PATH diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/StagedDockerfile b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/StagedDockerfile new file mode 100644 index 00000000..fc4623a3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/StagedDockerfile @@ -0,0 +1,7 @@ +FROM {{ docker_test_image_busybox }} AS first +ENV dir /first +WORKDIR ${dir} + +FROM {{ docker_test_image_busybox }} AS second +ENV dir /second +WORKDIR ${dir} diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image_info/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image_info/aliases new file mode 100644 index 00000000..02b78723 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image_info/aliases @@ -0,0 +1,2 @@ +shippable/posix/group4 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image_info/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image_info/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image_info/tasks/main.yml new file mode 100644 index 00000000..8b510c9c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_image_info/tasks/main.yml @@ -0,0 +1,59 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- block: + - name: Make sure image is not there + docker_image: + name: "{{ docker_test_image_alpine_different }}" + state: absent + + - name: Inspect a non-available image + docker_image_info: + name: "{{ docker_test_image_alpine_different }}" + register: result + + - assert: + that: + - "result.images|length == 0" + + - name: Make sure images are there + docker_image: + name: "{{ item }}" + source: pull + state: present + loop: + - "{{ docker_test_image_hello_world }}" + - "{{ docker_test_image_alpine }}" + + - name: Inspect an available image + docker_image_info: + name: "{{ docker_test_image_hello_world }}" + register: result + + - assert: + that: + - "result.images|length == 1" + - "docker_test_image_hello_world in result.images[0].RepoTags" + + - name: Inspect multiple images + docker_image_info: + name: + - "{{ docker_test_image_hello_world }}" + - "{{ docker_test_image_alpine }}" + register: result + + - debug: var=result + + - assert: + that: + - "result.images|length == 2" + - "docker_test_image_hello_world in result.images[0].RepoTags" + - "docker_test_image_alpine in result.images[1].RepoTags" + + when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_image_info tests!" + when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/aliases new file mode 100644 index 00000000..02b78723 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/aliases @@ -0,0 +1,2 @@ +shippable/posix/group4 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/meta/main.yml new file mode 100644 index 00000000..21d7a58f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker_registry diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/main.yml new file mode 100644 index 00000000..115c31e6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/main.yml @@ -0,0 +1,9 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6'] + include_tasks: + file: test.yml diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/run-test.yml new file mode 100644 index 00000000..a2999370 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/run-test.yml @@ -0,0 +1,3 @@ +--- +- name: "Loading tasks from {{ item }}" + include_tasks: "{{ item }}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/test.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/test.yml new file mode 100644 index 00000000..5a6f15fa --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/test.yml @@ -0,0 +1,9 @@ +--- +- block: + - include_tasks: run-test.yml + with_fileglob: + - "tests/*.yml" + when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_image tests!" + when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/tests/docker_login.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/tests/docker_login.yml new file mode 100644 index 00000000..1c584c0f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/tests/docker_login.yml @@ -0,0 +1,139 @@ +--- +- block: + - name: Log in with wrong password (check mode) + docker_login: + registry_url: "{{ registry_frontend_address }}" + username: testuser + password: "1234" + state: present + register: login_failed_check + ignore_errors: yes + check_mode: yes + + - name: Log in with wrong password + docker_login: + registry_url: "{{ registry_frontend_address }}" + username: testuser + password: "1234" + state: present + register: login_failed + ignore_errors: yes + + - name: Make sure that login failed + assert: + that: + - login_failed_check is failed + - "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed_check.msg" + - login_failed is failed + - "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed.msg" + + - name: Log in (check mode) + docker_login: + registry_url: "{{ registry_frontend_address }}" + username: testuser + password: hunter2 + state: present + register: login_1 + check_mode: yes + + - name: Log in + docker_login: + registry_url: "{{ registry_frontend_address }}" + username: testuser + password: hunter2 + state: present + register: login_2 + + - name: Get permissions of ~/.docker/config.json + stat: + path: ~/.docker/config.json + register: login_2_stat + + - name: Log in (idempotent) + docker_login: + registry_url: "{{ registry_frontend_address }}" + username: testuser + password: hunter2 + state: present + register: login_3 + + - name: Log in (idempotent, check mode) + docker_login: + registry_url: "{{ registry_frontend_address }}" + username: testuser + password: hunter2 + state: present + register: login_4 + check_mode: yes + + - name: Make sure that login worked + assert: + that: + - login_1 is changed + - login_2 is changed + - login_3 is not changed + - login_4 is not changed + - login_2_stat.stat.mode == '0600' + + - name: Log in again with wrong password (check mode) + docker_login: + registry_url: "{{ registry_frontend_address }}" + username: testuser + password: "1234" + state: present + register: login_failed_check + ignore_errors: yes + check_mode: yes + + - name: Log in again with wrong password + docker_login: + registry_url: "{{ registry_frontend_address }}" + username: testuser + password: "1234" + state: present + register: login_failed + ignore_errors: yes + + - name: Make sure that login failed again + assert: + that: + - login_failed_check is failed + - "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed_check.msg" + - login_failed is failed + - "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed.msg" + + - name: Log out (check mode) + docker_login: + registry_url: "{{ registry_frontend_address }}" + state: absent + register: logout_1 + check_mode: yes + + - name: Log out + docker_login: + registry_url: "{{ registry_frontend_address }}" + state: absent + register: logout_2 + + - name: Log out (idempotent) + docker_login: + registry_url: "{{ registry_frontend_address }}" + state: absent + register: logout_3 + + - name: Log out (idempotent, check mode) + docker_login: + registry_url: "{{ registry_frontend_address }}" + state: absent + register: logout_4 + check_mode: yes + + - name: Make sure that login worked + assert: + that: + - logout_1 is changed + - logout_2 is changed + - logout_3 is not changed + - logout_4 is not changed + + when: registry_frontend_address != 'n/a' diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/tests/multiple-servers.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/tests/multiple-servers.yml new file mode 100644 index 00000000..2ad0b66f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/tests/multiple-servers.yml @@ -0,0 +1,57 @@ +--- +- block: + - name: Log out server 1 + docker_login: + registry_url: "{{ registry_frontend_address }}" + username: testuser + password: hunter2 + state: absent + + - name: Log out server 2 + docker_login: + registry_url: "{{ registry_frontend2_address }}" + username: testuser + password: hunter2 + state: absent + + - name: Log in server 1 + docker_login: + registry_url: "{{ registry_frontend_address }}" + username: testuser + password: hunter2 + state: present + register: login_1 + + - name: Log in server 2 + docker_login: + registry_url: "{{ registry_frontend2_address }}" + username: testuser + password: hunter2 + state: present + register: login_2 + + - name: Log in server 1 (idempotent) + docker_login: + registry_url: "{{ registry_frontend_address }}" + username: testuser + password: hunter2 + state: present + register: login_1_2 + + - name: Log in server 2 (idempotent) + docker_login: + registry_url: "{{ registry_frontend2_address }}" + username: testuser + password: hunter2 + state: present + register: login_2_2 + + - name: Make sure that login worked + assert: + that: + - login_1 is changed + - login_2 is changed + - login_1_2 is not changed + - login_2_2 is not changed + + when: registry_frontend_address != 'n/a' and registry_frontend2_address != 'n/a' diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/aliases new file mode 100644 index 00000000..02b78723 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/aliases @@ -0,0 +1,2 @@ +shippable/posix/group4 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/main.yml new file mode 100644 index 00000000..d5fcdb91 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/main.yml @@ -0,0 +1,38 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Create random name prefix + set_fact: + name_prefix: "{{ 'ansible-test-%0x' % ((2**32) | random) }}" + cnames: [] + dnetworks: [] + +- debug: + msg: "Using name prefix {{ name_prefix }}" + +- block: + - include_tasks: run-test.yml + with_fileglob: + - "tests/*.yml" + + always: + - name: "Make sure all containers are removed" + docker_container: + name: "{{ item }}" + state: absent + force_kill: yes + loop: "{{ cnames }}" + - name: "Make sure all networks are removed" + docker_network: + name: "{{ item }}" + state: absent + force: yes + loop: "{{ dnetworks }}" + + when: docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.20', '>=') # FIXME: find out API version! + +- fail: msg="Too old docker / docker-py version to run docker_network tests!" + when: not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/run-test.yml new file mode 100644 index 00000000..a2999370 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/run-test.yml @@ -0,0 +1,3 @@ +--- +- name: "Loading tasks from {{ item }}" + include_tasks: "{{ item }}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/basic.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/basic.yml new file mode 100644 index 00000000..e6290715 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/basic.yml @@ -0,0 +1,134 @@ +--- +- name: Registering container and network names + set_fact: + cname_1: "{{ name_prefix ~ '-container-1' }}" + cname_2: "{{ name_prefix ~ '-container-2' }}" + cname_3: "{{ name_prefix ~ '-container-3' }}" + nname_1: "{{ name_prefix ~ '-network-1' }}" + nname_2: "{{ name_prefix ~ '-network-2' }}" +- name: Registering container and network names + set_fact: + cnames: "{{ cnames + [cname_1, cname_2, cname_3] }}" + dnetworks: "{{ dnetworks + [nname_1, nname_2] }}" + +- name: Create containers + docker_container: + name: "{{ container_name }}" + image: "{{ docker_test_image_alpine }}" + command: /bin/sleep 10m + state: started + loop: + - "{{ cname_1 }}" + - "{{ cname_2 }}" + - "{{ cname_3 }}" + loop_control: + loop_var: container_name + +#################################################################### + +- name: Create network + docker_network: + name: "{{ nname_1 }}" + state: present + register: networks_1 + +- name: Connect network to containers 1 + docker_network: + name: "{{ nname_1 }}" + state: present + connected: + - "{{ cname_1 }}" + register: networks_2 + +- name: Connect network to containers 1 (idempotency) + docker_network: + name: "{{ nname_1 }}" + state: present + connected: + - "{{ cname_1 }}" + register: networks_2_idem + +- name: Connect network to containers 1 and 2 + docker_network: + name: "{{ nname_1 }}" + state: present + connected: + - "{{ cname_1 }}" + - "{{ cname_2 }}" + register: networks_3 + +- name: Connect network to containers 1 and 2 (idempotency) + docker_network: + name: "{{ nname_1 }}" + state: present + connected: + - "{{ cname_1 }}" + - "{{ cname_2 }}" + register: networks_3_idem + +- name: Connect network to container 3 + docker_network: + name: "{{ nname_1 }}" + state: present + connected: + - "{{ cname_3 }}" + appends: yes + register: networks_4 + +- name: Connect network to container 3 (idempotency) + docker_network: + name: "{{ nname_1 }}" + state: present + connected: + - "{{ cname_3 }}" + appends: yes + register: networks_4_idem + +- name: Disconnect network from container 1 + docker_network: + name: "{{ nname_1 }}" + state: present + connected: + - "{{ cname_2 }}" + - "{{ cname_3 }}" + register: networks_5 + +- name: Disconnect network from container 1 (idempotency) + docker_network: + name: "{{ nname_1 }}" + state: present + connected: + - "{{ cname_2 }}" + - "{{ cname_3 }}" + register: networks_5_idem + +- name: Cleanup + docker_network: + name: "{{ nname_1 }}" + state: absent + +- assert: + that: + - networks_1 is changed + - networks_2 is changed + - networks_2_idem is not changed + - networks_3 is changed + - networks_3_idem is not changed + - networks_4 is changed + - networks_4_idem is not changed + - networks_5 is changed + - networks_5_idem is not changed + +#################################################################### + +- name: Delete containers + docker_container: + name: "{{ container_name }}" + state: absent + force_kill: yes + loop: + - "{{ cname_1 }}" + - "{{ cname_2 }}" + - "{{ cname_3 }}" + loop_control: + loop_var: container_name diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/ipam.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/ipam.yml new file mode 100644 index 00000000..214036c3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/ipam.yml @@ -0,0 +1,312 @@ +--- +- name: Registering network names + set_fact: + nname_ipam_0: "{{ name_prefix ~ '-network-ipam-0' }}" + nname_ipam_1: "{{ name_prefix ~ '-network-ipam-1' }}" + nname_ipam_2: "{{ name_prefix ~ '-network-ipam-2' }}" + nname_ipam_3: "{{ name_prefix ~ '-network-ipam-3' }}" + +- name: Registering network names + set_fact: + dnetworks: "{{ dnetworks + [nname_ipam_0, nname_ipam_1, nname_ipam_2, nname_ipam_3] }}" + + +#################### IPv4 IPAM config #################### + +- name: Create network with custom IPAM config + docker_network: + name: "{{ nname_ipam_1 }}" + ipam_config: + - subnet: 172.3.27.0/24 + gateway: 172.3.27.2 + iprange: 172.3.27.0/26 + aux_addresses: + host1: 172.3.27.3 + host2: 172.3.27.4 + register: network + +- assert: + that: + - network is changed + +- name: Create network with custom IPAM config (idempotence) + docker_network: + name: "{{ nname_ipam_1 }}" + ipam_config: + - subnet: 172.3.27.0/24 + gateway: 172.3.27.2 + iprange: 172.3.27.0/26 + aux_addresses: + host1: 172.3.27.3 + host2: 172.3.27.4 + register: network + +- assert: + that: + - network is not changed + +- name: Change of network created with custom IPAM config + docker_network: + name: "{{ nname_ipam_1 }}" + ipam_config: + - subnet: 172.3.28.0/24 + gateway: 172.3.28.2 + iprange: 172.3.28.0/26 + aux_addresses: + host1: 172.3.28.3 + register: network + diff: yes + +- assert: + that: + - network is changed + - network.diff.differences | length == 4 + - '"ipam_config[0].subnet" in network.diff.differences' + - '"ipam_config[0].gateway" in network.diff.differences' + - '"ipam_config[0].iprange" in network.diff.differences' + - '"ipam_config[0].aux_addresses" in network.diff.differences' + +- name: Remove gateway and iprange of network with custom IPAM config + docker_network: + name: "{{ nname_ipam_1 }}" + ipam_config: + - subnet: 172.3.28.0/24 + register: network + +- assert: + that: + - network is not changed + +- name: Cleanup network with custom IPAM config + docker_network: + name: "{{ nname_ipam_1 }}" + state: absent + + +#################### IPv6 IPAM config #################### + +- name: Create network with IPv6 IPAM config + docker_network: + name: "{{ nname_ipam_2 }}" + enable_ipv6: yes + ipam_config: + - subnet: fdd1:ac8c:0557:7ce0::/64 + register: network + +- assert: + that: + - network is changed + +- name: Create network with IPv6 IPAM config (idempotence) + docker_network: + name: "{{ nname_ipam_2 }}" + enable_ipv6: yes + ipam_config: + - subnet: fdd1:ac8c:0557:7ce0::/64 + register: network + +- assert: + that: + - network is not changed + +- name: Change subnet of network with IPv6 IPAM config + docker_network: + name: "{{ nname_ipam_2 }}" + enable_ipv6: yes + ipam_config: + - subnet: fdd1:ac8c:0557:7ce1::/64 + register: network + diff: yes + +- assert: + that: + - network is changed + - network.diff.differences | length == 1 + - network.diff.differences[0] == "ipam_config[0].subnet" + +- name: Change subnet of network with IPv6 IPAM config + docker_network: + name: "{{ nname_ipam_2 }}" + enable_ipv6: yes + ipam_config: + - subnet: "fdd1:ac8c:0557:7ce1::" + register: network + ignore_errors: yes + +- assert: + that: + - network is failed + - "network.msg == '\"fdd1:ac8c:0557:7ce1::\" is not a valid CIDR'" + +- name: Cleanup network with IPv6 IPAM config + docker_network: + name: "{{ nname_ipam_2 }}" + state: absent + + +#################### IPv4 and IPv6 network #################### + +- name: Create network with IPv6 and custom IPv4 IPAM config + docker_network: + name: "{{ nname_ipam_3 }}" + enable_ipv6: yes + ipam_config: + - subnet: 172.4.27.0/24 + - subnet: fdd1:ac8c:0557:7ce2::/64 + register: network + +- assert: + that: + - network is changed + +- name: Change subnet order of network with IPv6 and custom IPv4 IPAM config (idempotence) + docker_network: + name: "{{ nname_ipam_3 }}" + enable_ipv6: yes + ipam_config: + - subnet: fdd1:ac8c:0557:7ce2::/64 + - subnet: 172.4.27.0/24 + register: network + +- assert: + that: + - network is not changed + +- name: Remove IPv6 from network with custom IPv4 and IPv6 IPAM config (change) + docker_network: + name: "{{ nname_ipam_3 }}" + enable_ipv6: no + ipam_config: + - subnet: 172.4.27.0/24 + register: network + diff: yes + +- assert: + that: + - network is changed + - network.diff.differences | length == 1 + - network.diff.differences[0] == "enable_ipv6" + +- name: Cleanup network with IPv6 and custom IPv4 IPAM config + docker_network: + name: "{{ nname_ipam_3 }}" + state: absent + + +#################### multiple IPv4 networks #################### + +- block: + - name: Create network with two IPv4 IPAM configs + docker_network: + name: "{{ nname_ipam_3 }}" + driver: "macvlan" + driver_options: + parent: "{{ ansible_default_ipv4.alias }}" + ipam_config: + - subnet: 172.4.27.0/24 + - subnet: 172.4.28.0/24 + register: network + + - assert: + that: + - network is changed + + - name: Create network with two IPv4 IPAM configs (idempotence) + docker_network: + name: "{{ nname_ipam_3 }}" + driver: "macvlan" + driver_options: + parent: "{{ ansible_default_ipv4.alias }}" + ipam_config: + - subnet: 172.4.28.0/24 + - subnet: 172.4.27.0/24 + register: network + + - assert: + that: + - network is not changed + + - name: Create network with two IPv4 IPAM configs (change) + docker_network: + name: "{{ nname_ipam_3 }}" + driver: "macvlan" + driver_options: + parent: "{{ ansible_default_ipv4.alias }}" + ipam_config: + - subnet: 172.4.27.0/24 + - subnet: 172.4.29.0/24 + register: network + diff: yes + + - assert: + that: + - network is changed + - network.diff.differences | length == 1 + + - name: Create network with one IPv4 IPAM config (no change) + docker_network: + name: "{{ nname_ipam_3 }}" + driver: "macvlan" + driver_options: + parent: "{{ ansible_default_ipv4.alias }}" + ipam_config: + - subnet: 172.4.29.0/24 + register: network + + - assert: + that: + - network is not changed + + - name: Cleanup network + docker_network: + name: "{{ nname_ipam_3 }}" + state: absent + + when: ansible_facts.virtualization_type != 'docker' + + +#################### IPAM driver options #################### + +- name: Create network with IPAM driver options + docker_network: + name: "{{ nname_ipam_3 }}" + ipam_driver: default + ipam_driver_options: + a: b + register: network_1 + ignore_errors: yes +- name: Create network with IPAM driver options (idempotence) + docker_network: + name: "{{ nname_ipam_3 }}" + ipam_driver: default + ipam_driver_options: + a: b + diff: yes + register: network_2 + ignore_errors: yes +- name: Create network with IPAM driver options (change) + docker_network: + name: "{{ nname_ipam_3 }}" + ipam_driver: default + ipam_driver_options: + a: c + diff: yes + register: network_3 + ignore_errors: yes +- name: Cleanup network + docker_network: + name: "{{ nname_ipam_3 }}" + state: absent + +- assert: + that: + - network_1 is changed + - network_2 is not changed + - network_3 is changed + when: docker_py_version is version('2.0.0', '>=') +- assert: + that: + - network_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in network_1.msg" + - "'Minimum version required is 2.0.0 ' in network_1.msg" + when: docker_py_version is version('2.0.0', '<') diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/options.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/options.yml new file mode 100644 index 00000000..ea073db3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/options.yml @@ -0,0 +1,240 @@ +--- +- name: Registering network name + set_fact: + nname_1: "{{ name_prefix ~ '-network-1' }}" +- name: Registering network name + set_fact: + dnetworks: "{{ dnetworks + [nname_1] }}" + +#################################################################### +## internal ######################################################## +#################################################################### + +- name: internal + docker_network: + name: "{{ nname_1 }}" + internal: yes + register: internal_1 + +- name: internal (idempotency) + docker_network: + name: "{{ nname_1 }}" + internal: yes + register: internal_2 + +- name: internal (change) + docker_network: + name: "{{ nname_1 }}" + internal: no + register: internal_3 + +- name: cleanup + docker_network: + name: "{{ nname_1 }}" + state: absent + force: yes + +- assert: + that: + - internal_1 is changed + - internal_2 is not changed + - internal_3 is changed + +#################################################################### +## driver_options ################################################## +#################################################################### + +- name: driver_options + docker_network: + name: "{{ nname_1 }}" + driver_options: + com.docker.network.bridge.enable_icc: 'false' + register: driver_options_1 + +- name: driver_options (idempotency) + docker_network: + name: "{{ nname_1 }}" + driver_options: + com.docker.network.bridge.enable_icc: 'false' + register: driver_options_2 + +- name: driver_options (idempotency with string translation) + docker_network: + name: "{{ nname_1 }}" + driver_options: + com.docker.network.bridge.enable_icc: False + register: driver_options_3 + +- name: driver_options (change) + docker_network: + name: "{{ nname_1 }}" + driver_options: + com.docker.network.bridge.enable_icc: 'true' + register: driver_options_4 + +- name: driver_options (idempotency with string translation) + docker_network: + name: "{{ nname_1 }}" + driver_options: + com.docker.network.bridge.enable_icc: True + register: driver_options_5 + +- name: cleanup + docker_network: + name: "{{ nname_1 }}" + state: absent + force: yes + +- assert: + that: + - driver_options_1 is changed + - driver_options_2 is not changed + - driver_options_3 is not changed + - driver_options_4 is changed + - driver_options_5 is not changed + +#################################################################### +## scope ########################################################### +#################################################################### + +- block: + - name: scope + docker_network: + name: "{{ nname_1 }}" + driver: bridge + scope: local + register: scope_1 + + - name: scope (idempotency) + docker_network: + name: "{{ nname_1 }}" + driver: bridge + scope: local + register: scope_2 + + - name: swarm + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address}}" + + # Driver change alongside scope is intentional - bridge doesn't appear to support anything but local, and overlay can't downgrade to local. Additionally, overlay reports as swarm for swarm OR global, so no change is reported in that case. + # Test output indicates that the scope is altered, at least, so manual inspection will be required to verify this going forward, unless we come up with a test driver that supports multiple scopes. + - name: scope (change) + docker_network: + name: "{{ nname_1 }}" + driver: overlay + scope: swarm + register: scope_3 + + - name: cleanup network + docker_network: + name: "{{ nname_1 }}" + state: absent + force: yes + + - assert: + that: + - scope_1 is changed + - scope_2 is not changed + - scope_3 is changed + + always: + - name: cleanup swarm + docker_swarm: + state: absent + force: yes + + # Requirements for docker_swarm + when: docker_py_version is version('2.6.0', '>=') and docker_api_version is version('1.25', '>=') + +#################################################################### +## attachable ###################################################### +#################################################################### + +- name: attachable + docker_network: + name: "{{ nname_1 }}" + attachable: true + register: attachable_1 + ignore_errors: yes + +- name: attachable (idempotency) + docker_network: + name: "{{ nname_1 }}" + attachable: true + register: attachable_2 + ignore_errors: yes + +- name: attachable (change) + docker_network: + name: "{{ nname_1 }}" + attachable: false + register: attachable_3 + ignore_errors: yes + +- name: cleanup + docker_network: + name: "{{ nname_1 }}" + state: absent + force: yes + +- assert: + that: + - attachable_1 is changed + - attachable_2 is not changed + - attachable_3 is changed + when: docker_py_version is version('2.0.0', '>=') +- assert: + that: + - attachable_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in attachable_1.msg" + - "'Minimum version required is 2.0.0 ' in attachable_1.msg" + when: docker_py_version is version('2.0.0', '<') + +#################################################################### +## labels ########################################################## +#################################################################### + +- name: labels + docker_network: + name: "{{ nname_1 }}" + labels: + ansible.test.1: hello + ansible.test.2: world + register: labels_1 + +- name: labels (idempotency) + docker_network: + name: "{{ nname_1 }}" + labels: + ansible.test.2: world + ansible.test.1: hello + register: labels_2 + +- name: labels (less labels) + docker_network: + name: "{{ nname_1 }}" + labels: + ansible.test.1: hello + register: labels_3 + +- name: labels (more labels) + docker_network: + name: "{{ nname_1 }}" + labels: + ansible.test.1: hello + ansible.test.3: ansible + register: labels_4 + +- name: cleanup + docker_network: + name: "{{ nname_1 }}" + state: absent + force: yes + +- assert: + that: + - labels_1 is changed + - labels_2 is not changed + - labels_3 is not changed + - labels_4 is changed diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/overlay.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/overlay.yml new file mode 100644 index 00000000..832836aa --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/overlay.yml @@ -0,0 +1,61 @@ +--- +- name: Registering network name + set_fact: + nname_1: "{{ name_prefix ~ '-network-1' }}" +- name: Registering network name + set_fact: + dnetworks: "{{ dnetworks + [nname_1] }}" + +#################################################################### +## overlay ######################################################### +#################################################################### + +- block: + # Overlay networks require swarm initialization before they'll work + - name: swarm + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address}}" + + - name: overlay + docker_network: + name: "{{ nname_1 }}" + driver: overlay + driver_options: + com.docker.network.driver.overlay.vxlanid_list: "257" + register: overlay_1 + + - name: overlay (idempotency) + docker_network: + name: "{{ nname_1 }}" + driver: overlay + driver_options: + com.docker.network.driver.overlay.vxlanid_list: "257" + register: overlay_2 + + - name: overlay (change) + docker_network: + name: "{{ nname_1 }}" + driver: bridge + register: overlay_3 + + - name: cleanup network + docker_network: + name: "{{ nname_1 }}" + state: absent + force: yes + + - assert: + that: + - overlay_1 is changed + - overlay_2 is not changed + - overlay_3 is changed + + always: + - name: cleanup swarm + docker_swarm: + state: absent + force: yes + + # Requirements for docker_swarm + when: docker_py_version is version('2.6.0', '>=') and docker_api_version is version('1.25', '>=') diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/substring.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/substring.yml new file mode 100644 index 00000000..bdee190e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/substring.yml @@ -0,0 +1,37 @@ +--- +- name: Registering container and network names + set_fact: + nname_1: "{{ name_prefix ~ '-network-foo' }}" + nname_2: "{{ name_prefix ~ '-network-foobar' }}" +- name: Registering container and network names + set_fact: + dnetworks: "{{ dnetworks + [nname_1, nname_2] }}" + +#################################################################### + +- name: Create network (superstring) + docker_network: + name: "{{ nname_2 }}" + state: present + register: networks_1 + +- name: Create network (substring) + docker_network: + name: "{{ nname_1 }}" + state: present + register: networks_2 + +- name: Cleanup + docker_network: + name: "{{ network_name }}" + state: absent + loop: + - "{{ nname_1 }}" + - "{{ nname_2 }}" + loop_control: + loop_var: network_name + +- assert: + that: + - networks_1 is changed + - networks_2 is changed diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network_info/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network_info/aliases new file mode 100644 index 00000000..02b78723 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network_info/aliases @@ -0,0 +1,2 @@ +shippable/posix/group4 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network_info/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network_info/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network_info/tasks/main.yml new file mode 100644 index 00000000..70edcf6c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_network_info/tasks/main.yml @@ -0,0 +1,76 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- block: + - name: Create random network name + set_fact: + nname: "{{ 'ansible-test-%0x' % ((2**32) | random) }}" + + - name: Make sure network is not there + docker_network: + name: "{{ nname }}" + state: absent + force: yes + + - name: Inspect a non-present network + docker_network_info: + name: "{{ nname }}" + register: result + + - assert: + that: + - "not result.exists" + - "'network' in result" + - "result.network is none" + + - name: Make sure network exists + docker_network: + name: "{{ nname }}" + state: present + + - name: Inspect a present network + docker_network_info: + name: "{{ nname }}" + register: result + - name: Dump docker_network_info result + debug: var=result + + - name: "Comparison: use 'docker network inspect'" + command: docker network inspect "{{ nname }}" + register: docker_inspect + ignore_errors: yes + - block: + - set_fact: + docker_inspect_result: "{{ docker_inspect.stdout | from_json }}" + - name: Dump docker inspect result + debug: var=docker_inspect_result + when: docker_inspect is not failed + + - name: Cleanup + docker_network: + name: "{{ nname }}" + state: absent + force: yes + + - assert: + that: + - result.exists + - "'network' in result" + - "result.network" + + - assert: + that: + - "result.network == docker_inspect_result[0]" + when: docker_inspect is not failed + - assert: + that: + - "'is too new. Maximum supported API version is' in docker_inspect.stderr" + when: docker_inspect is failed + + when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.21', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_network_info tests!" + when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.21', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node/aliases new file mode 100644 index 00000000..f225f586 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +destructive +needs/root diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node/tasks/main.yml new file mode 100644 index 00000000..a91efc59 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node/tasks/main.yml @@ -0,0 +1,37 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Run the tests +- block: + - include_tasks: test_node.yml + + always: + - name: Cleanup (trying) + docker_swarm: + state: absent + force: true + diff: no + ignore_errors: yes + + - name: Restart docker daemon + service: + name: docker + state: restarted + become: yes + - name: Wait for docker daemon to be fully restarted + command: docker ps + ignore_errors: yes + + - name: Cleanup + docker_swarm: + state: absent + force: true + diff: no + + when: docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_node tests!" + when: not(docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node/tasks/test_node.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node/tasks/test_node.yml new file mode 100644 index 00000000..bdc01afb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node/tasks/test_node.yml @@ -0,0 +1,840 @@ +--- +- block: + - name: Make sure we're not already using Docker swarm + docker_swarm: + state: absent + force: true + + - name: Try to get docker_node_info when docker is not running in swarm mode + docker_node_info: + ignore_errors: yes + register: output + + - name: assert failure when called when swarm is not in use or not run on manager node + assert: + that: + - 'output is failed' + - 'output.msg == "Error running docker swarm module: must run on swarm manager node"' + + - name: Create a Swarm cluster + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" + register: output + + - name: assert changed when create a new swarm cluster + assert: + that: + - 'output is changed' + - 'output.actions[0] | regex_search("New Swarm cluster created: ")' + - 'output.swarm_facts.JoinTokens.Manager' + - 'output.swarm_facts.JoinTokens.Worker' + + - name: Try to get docker_node_info when docker is running in swarm mode and as manager + docker_node_info: + register: output + + - name: assert reading docker swarm node facts + assert: + that: + - 'output.nodes | length > 0' + - 'output.nodes[0].ID is string' + + - name: Register node ID + set_fact: + nodeid: "{{ output.nodes[0].ID }}" + +#################################################################### +## Set node as swarm manager ####################################### +#################################################################### + + - name: Try to set node as manager (check) + docker_node: + hostname: "{{ nodeid }}" + role: manager + check_mode: yes + register: set_as_manager_1 + + - name: Try to set node as manager + docker_node: + hostname: "{{ nodeid }}" + role: manager + register: set_as_manager_2 + + - name: Try to set node as manager (idempotent) + docker_node: + hostname: "{{ nodeid }}" + role: manager + register: set_as_manager_3 + + - name: Try to set node as manager (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + role: manager + check_mode: yes + register: set_as_manager_4 + + - name: assert that node role does has not changed + assert: + that: + - 'set_as_manager_1 is not changed' + - 'set_as_manager_2 is not changed' + - 'set_as_manager_3 is not changed' + - 'set_as_manager_4 is not changed' + - 'set_as_manager_1.node.Spec.Role == "manager"' + - 'set_as_manager_2.node.Spec.Role == "manager"' + - 'set_as_manager_3.node.Spec.Role == "manager"' + - 'set_as_manager_4.node.Spec.Role == "manager"' + +#################################################################### +## Set node as swarm worker ######################################## +#################################################################### + + - name: Try to set node as worker (check) + docker_node: + hostname: "{{ nodeid }}" + role: worker + check_mode: yes + register: set_as_worker_1 + + - name: Try to set node as worker + docker_node: + hostname: "{{ nodeid }}" + role: worker + ignore_errors: yes + register: set_as_worker_2 + + - name: assert that node cannot change role to worker + assert: + that: + - 'set_as_worker_1 is changed' + - 'set_as_worker_2 is failed' + - 'set_as_worker_2.msg | regex_search("attempting to demote the last manager of the swarm")' + +#################################################################### +## Set node as pasued ############################################## +#################################################################### + + - name: Try to set node availability as paused (check) + docker_node: + hostname: "{{ nodeid }}" + availability: pause + check_mode: yes + register: set_as_paused_1 + + - name: Try to set node availability as paused + docker_node: + hostname: "{{ nodeid }}" + availability: pause + register: set_as_paused_2 + + - name: Try to set node availability as paused (idempotent) + docker_node: + hostname: "{{ nodeid }}" + availability: pause + register: set_as_paused_3 + + - name: Try to set node availability as paused (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + availability: pause + check_mode: yes + register: set_as_paused_4 + + - name: assert node changed availability to paused + assert: + that: + - 'set_as_paused_1 is changed' + - 'set_as_paused_2 is changed' + - 'set_as_paused_3 is not changed' + - 'set_as_paused_4 is not changed' + - 'set_as_paused_2.node.Spec.Availability == "pause"' + +#################################################################### +## Set node as drained ############################################# +#################################################################### + + - name: Try to set node availability as drained (check) + docker_node: + hostname: "{{ nodeid }}" + availability: drain + check_mode: yes + register: output_drain_1 + + - name: Try to set node availability as drained + docker_node: + hostname: "{{ nodeid }}" + availability: drain + register: output_drain_2 + + - name: Try to set node availability as drained (idempotent) + docker_node: + hostname: "{{ nodeid }}" + availability: drain + register: output_drain_3 + + - name: Try to set node availability as drained (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + availability: drain + check_mode: yes + register: output_drain_4 + + - name: assert node changed availability to drained + assert: + that: + - 'output_drain_1 is changed' + - 'output_drain_2 is changed' + - 'output_drain_3 is not changed' + - 'output_drain_4 is not changed' + - 'output_drain_2.node.Spec.Availability == "drain"' + + +#################################################################### +## Set node as active ############################################## +#################################################################### + + - name: Try to set node availability as active (check) + docker_node: + hostname: "{{ nodeid }}" + availability: active + check_mode: yes + register: output_active_1 + + - name: Try to set node availability as active + docker_node: + hostname: "{{ nodeid }}" + availability: active + register: output_active_2 + + - name: Try to set node availability as active (idempotent) + docker_node: + hostname: "{{ nodeid }}" + availability: active + register: output_active_3 + + - name: Try to set node availability as active (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + availability: active + check_mode: yes + register: output_active_4 + + - name: assert node changed availability to active + assert: + that: + - 'output_active_1 is changed' + - 'output_active_2 is changed' + - 'output_active_3 is not changed' + - 'output_active_4 is not changed' + - 'output_active_2.node.Spec.Availability == "active"' + +#################################################################### +## Add single label ############################################### +#################################################################### + + - name: Try to add single label to swarm node (check) + docker_node: + hostname: "{{ nodeid }}" + labels: + label1: value1 + check_mode: yes + register: output_add_single_label_1 + + - name: Try to add single label to swarm node + docker_node: + hostname: "{{ nodeid }}" + labels: + label1: value1 + register: output_add_single_label_2 + + - name: Try to add single label to swarm node (idempotent) + docker_node: + hostname: "{{ nodeid }}" + labels: + label1: value1 + register: output_add_single_label_3 + + - name: Try to add single label to swarm node (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + labels: + label1: value1 + check_mode: yes + register: output_add_single_label_4 + + - name: assert adding single label to swarm node + assert: + that: + - 'output_add_single_label_1 is changed' + - 'output_add_single_label_2 is changed' + - 'output_add_single_label_3 is not changed' + - 'output_add_single_label_4 is not changed' + - 'output_add_single_label_2.node.Spec.Labels | length == 1' + - 'output_add_single_label_2.node.Spec.Labels.label1 == "value1"' + +#################################################################### +## Add multiple labels ############################################# +#################################################################### + + - name: Try to add five labels to swarm node (check) + docker_node: + hostname: "{{ nodeid }}" + labels: + label2: value2 + label3: value3 + label4: value4 + label5: value5 + label6: value6 + check_mode: yes + register: output_add_multiple_labels_1 + + - name: Try to add five labels to swarm node + docker_node: + hostname: "{{ nodeid }}" + labels: + label2: value2 + label3: value3 + label4: value4 + label5: value5 + label6: value6 + register: output_add_multiple_labels_2 + + - name: Try to add five labels to swarm node (idempotent) + docker_node: + hostname: "{{ nodeid }}" + labels: + label2: value2 + label3: value3 + label4: value4 + label5: value5 + label6: value6 + register: output_add_multiple_labels_3 + + - name: Try to add five labels to swarm node (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + labels: + label2: value2 + label3: value3 + label4: value4 + label5: value5 + label6: value6 + check_mode: yes + register: output_add_multiple_labels_4 + + - name: assert adding multiple labels to swarm node + assert: + that: + - 'output_add_multiple_labels_1 is changed' + - 'output_add_multiple_labels_2 is changed' + - 'output_add_multiple_labels_3 is not changed' + - 'output_add_multiple_labels_4 is not changed' + - 'output_add_multiple_labels_2.node.Spec.Labels | length == 6' + - 'output_add_multiple_labels_2.node.Spec.Labels.label1 == "value1"' + - 'output_add_multiple_labels_2.node.Spec.Labels.label6 == "value6"' + +#################################################################### +## Update label value ############################################## +#################################################################### + + - name: Update value of existing label (check) + docker_node: + hostname: "{{ nodeid }}" + labels: + label1: value1111 + check_mode: yes + register: output_update_label_1 + + - name: Update value of existing label + docker_node: + hostname: "{{ nodeid }}" + labels: + label1: value1111 + register: output_update_label_2 + + - name: Update value of existing label (idempotent) + docker_node: + hostname: "{{ nodeid }}" + labels: + label1: value1111 + register: output_update_label_3 + + - name: Update value of existing label (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + labels: + label1: value1111 + check_mode: yes + register: output_update_label_4 + + - name: assert updating single label assigned to swarm node + assert: + that: + - 'output_update_label_1 is changed' + - 'output_update_label_2 is changed' + - 'output_update_label_3 is not changed' + - 'output_update_label_4 is not changed' + - 'output_update_label_2.node.Spec.Labels | length == 6' + - 'output_update_label_2.node.Spec.Labels.label1 == "value1111"' + - 'output_update_label_2.node.Spec.Labels.label5 == "value5"' + +#################################################################### +## Update multiple labels values ################################### +#################################################################### + + - name: Update value of multiple existing label (check) + docker_node: + hostname: "{{ nodeid }}" + labels: + label2: value2222 + label3: value3333 + check_mode: yes + register: output_update_labels_1 + + - name: Update value of multiple existing label + docker_node: + hostname: "{{ nodeid }}" + labels: + label2: value2222 + label3: value3333 + register: output_update_labels_2 + + - name: Update value of multiple existing label (idempotent) + docker_node: + hostname: "{{ nodeid }}" + labels: + label2: value2222 + label3: value3333 + register: output_update_labels_3 + + - name: Update value of multiple existing label (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + labels: + label2: value2222 + label3: value3333 + check_mode: yes + register: output_update_labels_4 + + - name: assert updating multiple labels assigned to swarm node + assert: + that: + - 'output_update_labels_1 is changed' + - 'output_update_labels_2 is changed' + - 'output_update_labels_3 is not changed' + - 'output_update_labels_4 is not changed' + - 'output_update_labels_2.node.Spec.Labels | length == 6' + - 'output_update_labels_2.node.Spec.Labels.label1 == "value1111"' + - 'output_update_labels_2.node.Spec.Labels.label3 == "value3333"' + - 'output_update_labels_2.node.Spec.Labels.label5 == "value5"' + +#################################################################### +## Remove single label ############################################# +#################################################################### + + - name: Try to remove single existing label from swarm node (check) + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - label1 + check_mode: yes + register: output_remove_label_1 + + - name: Try to remove single existing label from swarm node + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - label1 + register: output_remove_label_2 + + - name: Try to remove single existing label from swarm node (idempotent) + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - label1 + register: output_remove_label_3 + + - name: Try to remove single existing label from swarm node (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - label1 + check_mode: yes + register: output_remove_label_4 + + - name: assert removing single label from swarm node + assert: + that: + - 'output_remove_label_1 is changed' + - 'output_remove_label_2 is changed' + - 'output_remove_label_3 is not changed' + - 'output_remove_label_4 is not changed' + - 'output_remove_label_2.node.Spec.Labels | length == 5' + - '"label1" not in output_remove_label_2.node.Spec.Labels' + - 'output_remove_label_2.node.Spec.Labels.label3 == "value3333"' + - 'output_remove_label_2.node.Spec.Labels.label5 == "value5"' + + +#################################################################### +## Remove single not assigned to swarm label ####################### +#################################################################### + + - name: Try to remove single non-existing label from swarm node (check) + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - labelnotexist + check_mode: yes + register: output_remove_nonexist_label_1 + + - name: Try to remove single non-existing label from swarm node + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - labelnotexist + register: output_remove_nonexist_label_2 + + - name: Try to remove single non-existing label from swarm node (idempotent) + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - labelnotexist + register: output_remove_nonexist_label_3 + + - name: Try to remove single non-existing label from swarm node (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - labelnotexist + check_mode: yes + register: output_remove_nonexist_label_4 + + - name: assert removing single non-existing label from swarm node + assert: + that: + - 'output_remove_nonexist_label_1 is not changed' + - 'output_remove_nonexist_label_2 is not changed' + - 'output_remove_nonexist_label_3 is not changed' + - 'output_remove_nonexist_label_4 is not changed' + - 'output_remove_nonexist_label_2.node.Spec.Labels | length == 5' + - '"label1" not in output_remove_nonexist_label_2.node.Spec.Labels' + - 'output_remove_nonexist_label_2.node.Spec.Labels.label3 == "value3333"' + - 'output_remove_nonexist_label_2.node.Spec.Labels.label5 == "value5"' + +#################################################################### +## Remove multiple labels ########################################## +#################################################################### + + - name: Try to remove two existing labels from swarm node (check) + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - label2 + - label3 + check_mode: yes + register: output_remove_label_1 + + - name: Try to remove two existing labels from swarm node + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - label2 + - label3 + register: output_remove_label_2 + + - name: Try to remove two existing labels from swarm node (idempotent) + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - label2 + - label3 + register: output_remove_label_3 + + - name: Try to remove two existing labels from swarm node (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - label2 + - label3 + check_mode: yes + register: output_remove_label_4 + + - name: assert removing multiple labels from swarm node + assert: + that: + - 'output_remove_label_1 is changed' + - 'output_remove_label_2 is changed' + - 'output_remove_label_3 is not changed' + - 'output_remove_label_4 is not changed' + - 'output_remove_label_2.node.Spec.Labels | length == 3' + - '"label1" not in output_remove_label_2.node.Spec.Labels' + - '"label2" not in output_remove_label_2.node.Spec.Labels' + - 'output_remove_label_2.node.Spec.Labels.label5 == "value5"' + +#################################################################### +## Remove multiple labels, mix assigned and not assigned ########## +#################################################################### + + - name: Try to remove mix of existing amd non-existing labels from swarm node (check) + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - label4 + - labelisnotthere + check_mode: yes + register: output_remove_mix_labels_1 + + - name: Try to remove mix of existing amd non-existing labels from swarm node + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - label4 + - labelisnotthere + register: output_remove_mix_labels_2 + + - name: Try to remove mix of existing amd non-existing labels from swarm node (idempotent) + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - label4 + - labelisnotthere + register: output_remove_mix_labels_3 + + - name: Try to remove mix of existing amd non-existing labels from swarm node (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + labels_to_remove: + - label4 + - labelisnotthere + check_mode: yes + register: output_remove_mix_labels_4 + + - name: assert removing mix of existing and non-existing labels from swarm node + assert: + that: + - 'output_remove_mix_labels_1 is changed' + - 'output_remove_mix_labels_2 is changed' + - 'output_remove_mix_labels_3 is not changed' + - 'output_remove_mix_labels_4 is not changed' + - 'output_remove_mix_labels_2.node.Spec.Labels | length == 2' + - '"label1" not in output_remove_mix_labels_2.node.Spec.Labels' + - '"label4" not in output_remove_mix_labels_2.node.Spec.Labels' + - 'output_remove_mix_labels_2.node.Spec.Labels.label5 == "value5"' + +#################################################################### +## Add and remove labels ########################################### +#################################################################### + + - name: Try to add and remove nonoverlapping labels at the same time (check) + docker_node: + hostname: "{{ nodeid }}" + labels: + label7: value7 + label8: value8 + labels_to_remove: + - label5 + check_mode: yes + register: output_add_del_labels_1 + + - name: Try to add and remove nonoverlapping labels at the same time + docker_node: + hostname: "{{ nodeid }}" + labels: + label7: value7 + label8: value8 + labels_to_remove: + - label5 + register: output_add_del_labels_2 + + - name: Try to add and remove nonoverlapping labels at the same time (idempotent) + docker_node: + hostname: "{{ nodeid }}" + labels: + label7: value7 + label8: value8 + labels_to_remove: + - label5 + register: output_add_del_labels_3 + + - name: Try to add and remove nonoverlapping labels at the same time (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + labels: + label7: value7 + label8: value8 + labels_to_remove: + - label5 + check_mode: yes + register: output_add_del_labels_4 + + - name: assert adding and removing nonoverlapping labels from swarm node + assert: + that: + - 'output_add_del_labels_1 is changed' + - 'output_add_del_labels_2 is changed' + - 'output_add_del_labels_3 is not changed' + - 'output_add_del_labels_4 is not changed' + - 'output_add_del_labels_2.node.Spec.Labels | length == 3' + - '"label5" not in output_add_del_labels_2.node.Spec.Labels' + - 'output_add_del_labels_2.node.Spec.Labels.label8 == "value8"' + +#################################################################### +## Add and remove labels with label in both lists ################## +#################################################################### + + - name: Try to add or update and remove overlapping labels at the same time (check) + docker_node: + hostname: "{{ nodeid }}" + labels: + label22: value22 + label6: value6666 + labels_to_remove: + - label6 + - label7 + check_mode: yes + register: output_add_del_overlap_lables_1 + + - name: Try to add or update and remove overlapping labels at the same time + docker_node: + hostname: "{{ nodeid }}" + labels: + label22: value22 + label6: value6666 + labels_to_remove: + - label6 + - label7 + register: output_add_del_overlap_lables_2 + + - name: Try to add or update and remove overlapping labels at the same time (idempotent) + docker_node: + hostname: "{{ nodeid }}" + labels: + label22: value22 + label6: value6666 + labels_to_remove: + - label6 + - label7 + register: output_add_del_overlap_lables_3 + + - name: Try to add or update and remove overlapping labels at the same time (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + labels: + label22: value22 + label6: value6666 + labels_to_remove: + - label6 + - label7 + check_mode: yes + register: output_add_del_overlap_lables_4 + + - name: assert adding or updating and removing overlapping labels from swarm node + assert: + that: + - 'output_add_del_overlap_lables_1 is changed' + - 'output_add_del_overlap_lables_2 is changed' + - 'output_add_del_overlap_lables_3 is not changed' + - 'output_add_del_overlap_lables_4 is not changed' + - 'output_add_del_overlap_lables_2.node.Spec.Labels | length == 3' + - '"label7" not in output_add_del_overlap_lables_2.node.Spec.Labels' + - 'output_add_del_overlap_lables_2.node.Spec.Labels.label6 == "value6666"' + - 'output_add_del_overlap_lables_2.node.Spec.Labels.label22 == "value22"' + +#################################################################### +## Replace labels ############################################# +#################################################################### + + - name: Replace labels on swarm node (check) + docker_node: + hostname: "{{ nodeid }}" + labels: + label11: value11 + label12: value12 + labels_state: replace + check_mode: yes + register: output_replace_labels_1 + + - name: Replace labels on swarm node + docker_node: + hostname: "{{ nodeid }}" + labels: + label11: value11 + label12: value12 + labels_state: replace + register: output_replace_labels_2 + + - name: Replace labels on swarm node (idempotent) + docker_node: + hostname: "{{ nodeid }}" + labels: + label11: value11 + label12: value12 + labels_state: replace + register: output_replace_labels_3 + + - name: Replace labels on swarm node (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + labels: + label11: value11 + label12: value12 + labels_state: replace + check_mode: yes + register: output_replace_labels_4 + + - name: assert replacing labels from swarm node + assert: + that: + - 'output_replace_labels_1 is changed' + - 'output_replace_labels_2 is changed' + - 'output_replace_labels_3 is not changed' + - 'output_replace_labels_4 is not changed' + - 'output_replace_labels_2.node.Spec.Labels | length == 2' + - '"label6" not in output_replace_labels_2.node.Spec.Labels' + - 'output_replace_labels_2.node.Spec.Labels.label12 == "value12"' + +#################################################################### +## Remove all labels ############################################# +#################################################################### + + - name: Remove all labels from swarm node (check) + docker_node: + hostname: "{{ nodeid }}" + labels_state: replace + check_mode: yes + register: output_remove_labels_1 + + - name: Remove all labels from swarm node + docker_node: + hostname: "{{ nodeid }}" + labels_state: replace + register: output_remove_labels_2 + + - name: Remove all labels from swarm node (idempotent) + docker_node: + hostname: "{{ nodeid }}" + labels_state: replace + register: output_remove_labels_3 + + - name: Remove all labels from swarm node (idempotent check) + docker_node: + hostname: "{{ nodeid }}" + labels_state: replace + check_mode: yes + register: output_remove_labels_4 + + - name: assert removing all lables from swarm node + assert: + that: + - 'output_remove_labels_1 is changed' + - 'output_remove_labels_2 is changed' + - 'output_remove_labels_3 is not changed' + - 'output_remove_labels_4 is not changed' + - 'output_remove_labels_2.node.Spec.Labels | length == 0' + + always: + - name: Cleanup + docker_swarm: + state: absent + force: true diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node_info/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node_info/aliases new file mode 100644 index 00000000..ca7c9128 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node_info/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node_info/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node_info/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node_info/tasks/main.yml new file mode 100644 index 00000000..7855f7e2 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node_info/tasks/main.yml @@ -0,0 +1,11 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- include_tasks: test_node_info.yml + # Maximum of 1.24 (docker API version for docker_node_info) and 1.25 (docker API version for docker_swarm) is 1.25 + when: docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_node_info tests!" + when: not(docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node_info/tasks/test_node_info.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node_info/tasks/test_node_info.yml new file mode 100644 index 00000000..3ee5549b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_node_info/tasks/test_node_info.yml @@ -0,0 +1,88 @@ +--- +- block: + - name: Make sure we're not already using Docker swarm + docker_swarm: + state: absent + force: true + + - name: Try to get docker_node_info when docker is not running in swarm mode + docker_node_info: + ignore_errors: yes + register: output + + - name: assert failure when called when swarm is not in use or not run on manager node + assert: + that: + - 'output is failed' + - 'output.msg == "Error running docker swarm module: must run on swarm manager node"' + + - name: Create a Swarm cluster + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" + register: output + + - name: assert changed when create a new swarm cluster + assert: + that: + - 'output is changed' + - 'output.actions[0] | regex_search("New Swarm cluster created: ")' + - 'output.swarm_facts.JoinTokens.Manager' + - 'output.swarm_facts.JoinTokens.Worker' + + - name: Try to get docker_node_info when docker is running in swarm mode and as manager + docker_node_info: + register: output + + - name: assert reading docker swarm node facts + assert: + that: + - 'output.nodes | length > 0' + - 'output.nodes[0].ID is string' + + - name: Try to get docker_node_info using the self parameter + docker_node_info: + self: yes + register: output + + - name: assert reading swarm facts with list of nodes option + assert: + that: + - 'output.nodes | length == 1' + - 'output.nodes[0].ID is string' + + - name: Get local docker node name + set_fact: + localnodename: "{{ output.nodes[0].Description.Hostname }}" + + + - name: Try to get docker_node_info using the local node name as parameter + docker_node_info: + name: "{{ localnodename }}" + register: output + + - name: assert reading reading swarm facts and using node filter (random node name) + assert: + that: + - 'output.nodes | length == 1' + - 'output.nodes[0].ID is string' + + - name: Create random name + set_fact: + randomnodename: "{{ 'node-%0x' % ((2**32) | random) }}" + + - name: Try to get docker_node_info using random node name as parameter + docker_node_info: + name: "{{ randomnodename }}" + register: output + + - name: assert reading reading swarm facts and using node filter (random node name) + assert: + that: + - 'output.nodes | length == 0' + + always: + - name: Cleanup + docker_swarm: + state: absent + force: true diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_prune/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_prune/aliases new file mode 100644 index 00000000..02b78723 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_prune/aliases @@ -0,0 +1,2 @@ +shippable/posix/group4 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_prune/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_prune/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_prune/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_prune/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_prune/tasks/main.yml new file mode 100644 index 00000000..16c4aa05 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_prune/tasks/main.yml @@ -0,0 +1,68 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Create random names + set_fact: + cname: "{{ 'ansible-container-%0x' % ((2**32) | random) }}" + nname: "{{ 'ansible-network-%0x' % ((2**32) | random) }}" + vname: "{{ 'ansible-volume-%0x' % ((2**32) | random) }}" + +- block: + # Create objects to be pruned + - docker_container: + name: "{{ cname }}" + image: "{{ docker_test_image_hello_world }}" + state: present + register: container + - docker_network: + name: "{{ nname }}" + state: present + register: network + - docker_volume: + name: "{{ vname }}" + state: present + register: volume + + # Prune objects + - docker_prune: + containers: yes + images: yes + networks: yes + volumes: yes + builder_cache: "{{ docker_py_version is version('3.3.0', '>=') }}" + register: result + + # Analyze result + - debug: var=result + - assert: + that: + # containers + - container.container.Id in result.containers + - "'containers_space_reclaimed' in result" + # images + - "'images_space_reclaimed' in result" + # networks + - network.network.Name in result.networks + # volumes + - volume.volume.Name in result.volumes + - "'volumes_space_reclaimed' in result" + # builder_cache + - "'builder_cache_space_reclaimed' in result or docker_py_version is version('3.3.0', '<')" + - "'builder_cache_space_reclaimed' not in result or docker_py_version is version('3.3.0', '>=')" + + # Test with filters + - docker_prune: + images: yes + images_filters: + dangling: true + register: result + + - debug: var=result + + when: docker_py_version is version('2.1.0', '>=') and docker_api_version is version('1.25', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_prune tests!" + when: not(docker_py_version is version('2.1.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_secret/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_secret/aliases new file mode 100644 index 00000000..cdf1b9b1 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_secret/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_secret/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_secret/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_secret/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_secret/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_secret/tasks/main.yml new file mode 100644 index 00000000..68d3df8d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_secret/tasks/main.yml @@ -0,0 +1,10 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- include_tasks: test_secrets.yml + when: docker_py_version is version('2.1.0', '>=') and docker_api_version is version('1.25', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_secrets tests!" + when: not(docker_py_version is version('2.1.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_secret/tasks/test_secrets.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_secret/tasks/test_secrets.yml new file mode 100644 index 00000000..2c078488 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_secret/tasks/test_secrets.yml @@ -0,0 +1,124 @@ +--- +- block: + - name: Make sure we're not already using Docker swarm + docker_swarm: + state: absent + force: true + + - name: Create a Swarm cluster + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address}}" + + - name: Parameter name should be required + docker_secret: + state: present + ignore_errors: yes + register: output + + - name: assert failure when called with no name + assert: + that: + - 'output.failed' + - 'output.msg == "missing required arguments: name"' + + - name: Test parameters + docker_secret: + name: foo + state: present + ignore_errors: yes + register: output + + - name: assert failure when called with no data + assert: + that: + - 'output.failed' + - 'output.msg == "state is present but all of the following are missing: data"' + + - name: Create secret + docker_secret: + name: db_password + data: opensesame! + state: present + register: output + + - name: Create variable secret_id + set_fact: + secret_id: "{{ output.secret_id }}" + + - name: Inspect secret + command: "docker secret inspect {{ secret_id }}" + register: inspect + ignore_errors: yes + + - debug: var=inspect + + - name: assert secret creation succeeded + assert: + that: + - "'db_password' in inspect.stdout" + - "'ansible_key' in inspect.stdout" + when: inspect is not failed + - assert: + that: + - "'is too new. Maximum supported API version is' in inspect.stderr" + when: inspect is failed + + - name: Create secret again + docker_secret: + name: db_password + data: opensesame! + state: present + register: output + + - name: assert create secret is idempotent + assert: + that: + - not output.changed + + - name: Create secret again (base64) + docker_secret: + name: db_password + data: b3BlbnNlc2FtZSE= + data_is_b64: true + state: present + register: output + + - name: assert create secret (base64) is idempotent + assert: + that: + - not output.changed + + - name: Update secret + docker_secret: + name: db_password + data: newpassword! + state: present + register: output + + - name: assert secret was updated + assert: + that: + - output.changed + - output.secret_id != secret_id + + - name: Remove secret + docker_secret: + name: db_password + state: absent + + - name: Check that secret is removed + command: "docker secret inspect {{ secret_id }}" + register: output + ignore_errors: yes + + - name: assert secret was removed + assert: + that: + - output.failed + + always: + - name: Remove Swarm cluster + docker_swarm: + state: absent + force: true diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/aliases new file mode 100644 index 00000000..ca7c9128 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/tasks/main.yml new file mode 100644 index 00000000..5ed63f3e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/tasks/main.yml @@ -0,0 +1,10 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- include_tasks: test_stack.yml + when: docker_api_version is version('1.25', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_stack tests!" + when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/tasks/test_stack.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/tasks/test_stack.yml new file mode 100644 index 00000000..d1f332c2 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/tasks/test_stack.yml @@ -0,0 +1,113 @@ +--- +- block: + - name: Make sure we're not already using Docker swarm + docker_swarm: + state: absent + force: true + + - name: Create a Swarm cluster + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address}}" + + - name: install docker_stack python requirements + pip: + name: jsondiff,pyyaml + + - name: Create a stack without name + register: output + docker_stack: + state: present + ignore_errors: yes + + - name: assert failure when name not set + assert: + that: + - output is failed + - 'output.msg == "missing required arguments: name"' + + - name: Create a stack without compose + register: output + docker_stack: + name: test_stack + ignore_errors: yes + + - name: assert failure when compose not set + assert: + that: + - output is failed + - 'output.msg == "compose parameter must be a list containing at least one element"' + + - name: Ensure stack is absent + register: output + docker_stack: + state: absent + name: test_stack + absent_retries: 30 + + - name: Template compose files + template: + src: "{{item}}" + dest: "{{output_dir}}/" + with_items: + - stack_compose_base.yml + - stack_compose_overrides.yml + + - name: Create stack with compose file + register: output + docker_stack: + state: present + name: test_stack + compose: + - "{{output_dir}}/stack_compose_base.yml" + + - name: assert test_stack changed on stack creation with compose file + assert: + that: + - output is changed + + # FIXME: updating the stack prevents leaving the swarm on Shippable + #- name: Update stack with YAML + # register: output + # docker_stack: + # state: present + # name: test_stack + # compose: + # - "{{stack_compose_base}}" + # - "{{stack_compose_overrides}}" + # + #- name: assert test_stack correctly changed on update with yaml + # assert: + # that: + # - output is changed + # - output.stack_spec_diff == stack_update_expected_diff + + - name: Delete stack + register: output + docker_stack: + state: absent + name: test_stack + absent_retries: 30 + + - name: assert delete of existing stack returns changed + assert: + that: + - output is changed + + - name: Delete stack again + register: output + docker_stack: + state: absent + name: test_stack + absent_retries: 30 + + - name: assert state=absent idempotency + assert: + that: + - output is not changed + + always: + - name: Remove a Swarm cluster + docker_swarm: + state: absent + force: true diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/templates/stack_compose_base.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/templates/stack_compose_base.yml new file mode 100644 index 00000000..b5162d68 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/templates/stack_compose_base.yml @@ -0,0 +1,5 @@ +version: '3' +services: + busybox: + image: "{{ docker_test_image_busybox }}" + command: sleep 3600 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/templates/stack_compose_overrides.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/templates/stack_compose_overrides.yml new file mode 100644 index 00000000..1b81c71b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/templates/stack_compose_overrides.yml @@ -0,0 +1,5 @@ +version: '3' +services: + busybox: + environment: + envvar: value diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/vars/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/vars/main.yml new file mode 100644 index 00000000..7751c86e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack/vars/main.yml @@ -0,0 +1,15 @@ +stack_compose_base: + version: '3' + services: + busybox: + image: "{{ docker_test_image_busybox }}" + command: sleep 3600 + +stack_compose_overrides: + version: '3' + services: + busybox: + environment: + envvar: value + +stack_update_expected_diff: '{"test_stack_busybox": {"TaskTemplate": {"ContainerSpec": {"Env": ["envvar=value"]}}}}' diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/aliases new file mode 100644 index 00000000..ca7c9128 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/tasks/main.yml new file mode 100644 index 00000000..b499f80d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/tasks/main.yml @@ -0,0 +1,10 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- include_tasks: test_stack_info.yml + when: docker_api_version is version('1.25', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_stack tests!" + when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/tasks/test_stack_info.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/tasks/test_stack_info.yml new file mode 100644 index 00000000..a2a39727 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/tasks/test_stack_info.yml @@ -0,0 +1,75 @@ +--- +- block: + - name: Make sure we're not already using Docker swarm + docker_swarm: + state: absent + force: true + + - name: Get docker_stack_info when docker is not running in swarm mode + docker_stack_info: + ignore_errors: true + register: output + + - name: Assert failure when called when swarm is not running + assert: + that: + - 'output is failed' + - '"Error running docker stack" in output.msg' + + - name: Create a swarm cluster + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address}}" + + - name: Get docker_stack_info when docker is running and not stack available + docker_stack_info: + register: output + + - name: Assert stack facts + assert: + that: + - 'output.results | type_debug == "list"' + - 'output.results | length == 0' + + - name: Template compose files + template: + src: "{{ item }}" + dest: "{{ output_dir }}/" + with_items: + - stack_compose_base.yml + - stack_compose_overrides.yml + + - name: Install docker_stack python requirements + pip: + name: jsondiff,pyyaml + + - name: Create stack with compose file + register: output + docker_stack: + state: present + name: test_stack + compose: + - "{{ output_dir }}/stack_compose_base.yml" + + - name: Assert test_stack changed on stack creation with compose file + assert: + that: + - output is changed + + - name: Get docker_stack_info when docker is running + docker_stack_info: + register: output + + - name: assert stack facts + assert: + that: + - 'output.results | type_debug == "list"' + - 'output.results[0].Name == "test_stack"' + - 'output.results[0].Orchestrator == "Swarm"' + - 'output.results[0].Services == "1"' + + always: + - name: Cleanup + docker_swarm: + state: absent + force: true diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/templates/stack_compose_base.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/templates/stack_compose_base.yml new file mode 100644 index 00000000..b5162d68 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/templates/stack_compose_base.yml @@ -0,0 +1,5 @@ +version: '3' +services: + busybox: + image: "{{ docker_test_image_busybox }}" + command: sleep 3600 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/templates/stack_compose_overrides.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/templates/stack_compose_overrides.yml new file mode 100644 index 00000000..1b81c71b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/templates/stack_compose_overrides.yml @@ -0,0 +1,5 @@ +version: '3' +services: + busybox: + environment: + envvar: value diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/vars/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/vars/main.yml new file mode 100644 index 00000000..7751c86e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/vars/main.yml @@ -0,0 +1,15 @@ +stack_compose_base: + version: '3' + services: + busybox: + image: "{{ docker_test_image_busybox }}" + command: sleep 3600 + +stack_compose_overrides: + version: '3' + services: + busybox: + environment: + envvar: value + +stack_update_expected_diff: '{"test_stack_busybox": {"TaskTemplate": {"ContainerSpec": {"Env": ["envvar=value"]}}}}' diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/aliases new file mode 100644 index 00000000..ca7c9128 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/tasks/main.yml new file mode 100644 index 00000000..0990e90b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/tasks/main.yml @@ -0,0 +1,10 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- include_tasks: test_stack_task_info.yml + when: docker_api_version is version('1.25', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_stack tests!" + when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/tasks/test_stack_task_info.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/tasks/test_stack_task_info.yml new file mode 100644 index 00000000..88e9eca3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/tasks/test_stack_task_info.yml @@ -0,0 +1,84 @@ +--- +- block: + - name: Make sure we're not already using Docker swarm + docker_swarm: + state: absent + force: true + + - name: Get docker_stack_info when docker is not running in swarm mode + docker_stack_info: + ignore_errors: true + register: output + + - name: Assert failure when called when swarm is not running + assert: + that: + - 'output is failed' + - '"Error running docker stack" in output.msg' + + - name: Create a swarm cluster + docker_swarm: + state: present + advertise_addr: "{{ ansible_default_ipv4.address }}" + + - name: Get docker_stack_info when docker is running and not stack available + docker_stack_info: + register: output + + - name: Assert stack facts + assert: + that: + - 'output.results | type_debug == "list"' + - 'output.results | length == 0' + + - name: Template compose files + template: + src: "{{ item }}" + dest: "{{ output_dir }}/" + with_items: + - stack_compose_base.yml + - stack_compose_overrides.yml + + - name: Install docker_stack python requirements + pip: + name: jsondiff,pyyaml + + - name: Create stack with compose file + register: output + docker_stack: + state: present + name: test_stack + compose: + - "{{ output_dir }}/stack_compose_base.yml" + + - name: Assert test_stack changed on stack creation with compose file + assert: + that: + - output is changed + + - name: Wait a bit to make sure stack is running + pause: + seconds: 5 + + - name: Get docker_stack_info when docker is running + docker_stack_info: + register: output + + - name: Get docker_stack_task_info first element + docker_stack_task_info: + name: "{{ output.results[0].Name }}" + register: output + + - name: assert stack facts + assert: + that: + - 'output.results | type_debug == "list"' + - 'output.results[0].DesiredState == "Running"' + - 'output.results[0].Image == docker_test_image_busybox' + - 'output.results[0].Name == "test_stack_busybox.1"' + + always: + - name: Cleanup + docker_swarm: + state: absent + force: true diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/templates/stack_compose_base.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/templates/stack_compose_base.yml new file mode 100644 index 00000000..b5162d68 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/templates/stack_compose_base.yml @@ -0,0 +1,5 @@ +version: '3' +services: + busybox: + image: "{{ docker_test_image_busybox }}" + command: sleep 3600 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/templates/stack_compose_overrides.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/templates/stack_compose_overrides.yml new file mode 100644 index 00000000..1b81c71b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/templates/stack_compose_overrides.yml @@ -0,0 +1,5 @@ +version: '3' +services: + busybox: + environment: + envvar: value diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/vars/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/vars/main.yml new file mode 100644 index 00000000..7751c86e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/vars/main.yml @@ -0,0 +1,15 @@ +stack_compose_base: + version: '3' + services: + busybox: + image: "{{ docker_test_image_busybox }}" + command: sleep 3600 + +stack_compose_overrides: + version: '3' + services: + busybox: + environment: + envvar: value + +stack_update_expected_diff: '{"test_stack_busybox": {"TaskTemplate": {"ContainerSpec": {"Env": ["envvar=value"]}}}}' diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/aliases new file mode 100644 index 00000000..3bab2a23 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/aliases @@ -0,0 +1,3 @@ +shippable/posix/group1 +destructive +needs/root diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/meta/main.yml new file mode 100644 index 00000000..078660a8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - setup_docker + - setup_openssl diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/cleanup.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/cleanup.yml new file mode 100644 index 00000000..3e19bfcd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/cleanup.yml @@ -0,0 +1,35 @@ +- name: CLEANUP | Leave Docker Swarm + docker_swarm: + state: absent + force: true + ignore_errors: yes + register: leave_swarm + +- name: CLEANUP | Kill Docker and cleanup + when: leave_swarm is failed + block: + - name: CLEANUP | Kill docker daemon + command: systemctl kill -s 9 docker + become: yes + + - name: CLEANUP | Clear out /var/lib/docker + shell: rm -rf /var/lib/docker/* + args: + warn: no + + - name: CLEANUP | Start docker daemon + service: + name: docker + state: started + become: yes + + - name: CLEANUP | Wait for docker daemon to be fully started + command: docker ps + register: result + until: result is success + retries: 10 + + - name: CLEANUP | Leave Docker Swarm + docker_swarm: + state: absent + force: true diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/main.yml new file mode 100644 index 00000000..597b71a8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/main.yml @@ -0,0 +1,23 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Run Docker Swarm tests + when: + - docker_py_version is version('1.10.0', '>=') + - docker_api_version is version('1.25', '>=') + + block: + - include_tasks: "{{ item }}" + with_fileglob: + - 'tests/*.yml' + + always: + - import_tasks: cleanup.yml + +- fail: + msg: "Too old docker / docker-py version to run docker_swarm tests!" + when: + - not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.25', '>=')) + - (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/run-test.yml new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/run-test.yml diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/basic.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/basic.yml new file mode 100644 index 00000000..4e9005fc --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/basic.yml @@ -0,0 +1,158 @@ +- debug: + msg: Running tests/basic.yml + +#################################################################### +## Errors ########################################################## +#################################################################### +- name: Test parameters with state=join + docker_swarm: + state: join + ignore_errors: yes + register: output + +- name: assert failure when called with state=join and no remote_addrs,join_token + assert: + that: + - 'output.failed' + - 'output.msg == "state is join but all of the following are missing: remote_addrs, join_token"' + +- name: Test parameters with state=remove + docker_swarm: + state: remove + ignore_errors: yes + register: output + +- name: assert failure when called with state=remove and no node_id + assert: + that: + - 'output.failed' + - 'output.msg == "state is remove but all of the following are missing: node_id"' + +#################################################################### +## Creation ######################################################## +#################################################################### + +- name: Create a Swarm cluster (check mode) + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" + check_mode: yes + diff: yes + register: output_1 + +- name: Create a Swarm cluster + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" + diff: yes + register: output_2 + +- name: Create a Swarm cluster (idempotent) + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" + diff: yes + register: output_3 + +- name: Create a Swarm cluster (idempotent, check mode) + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" + check_mode: yes + diff: yes + register: output_4 + +- name: Create a Swarm cluster (force re-create) + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" + force: yes + diff: yes + register: output_5 + +- name: Create a Swarm cluster (force re-create, check mode) + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" + force: yes + check_mode: yes + diff: yes + register: output_6 + +- name: assert changed when create a new swarm cluster + assert: + that: + - 'output_1 is changed' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] | regex_search("New Swarm cluster created: ")' + - 'output_2.swarm_facts.JoinTokens.Manager' + - 'output_2.swarm_facts.JoinTokens.Worker' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + - 'output_5 is changed' + - 'output_5.diff.before is defined' + - 'output_5.diff.after is defined' + - 'output_6 is changed' + - 'output_6.diff.before is defined' + - 'output_6.diff.after is defined' + +#################################################################### +## Removal ######################################################### +#################################################################### + +- name: Remove a Swarm cluster (check mode) + docker_swarm: + state: absent + force: true + check_mode: yes + diff: yes + register: output_1 + +- name: Remove a Swarm cluster + docker_swarm: + state: absent + force: true + diff: yes + register: output_2 + +- name: Remove a Swarm cluster (idempotent) + docker_swarm: + state: absent + force: true + diff: yes + register: output_3 + +- name: Remove a Swarm cluster (idempotent, check mode) + docker_swarm: + state: absent + force: true + check_mode: yes + diff: yes + register: output_4 + +- name: assert changed when remove a swarm cluster + assert: + that: + - 'output_1 is changed' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] == "Node has left the swarm cluster"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + +- include_tasks: cleanup.yml diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/options-ca.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/options-ca.yml new file mode 100644 index 00000000..6221e3e7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/options-ca.yml @@ -0,0 +1,114 @@ +- debug: + msg: Running tests/options-ca.yml +- name: options-ca + when: cryptography_version.stdout is version('1.6', '>=') + block: + - name: Generate privatekey + loop: + - key1 + - key2 + loop_control: + loop_var: key + community.crypto.openssl_privatekey: + path: '{{ output_dir }}/ansible_{{ key }}.key' + size: 2048 + mode: '0666' + - name: Generate CSR + loop: + - key1 + - key2 + loop_control: + loop_var: key + community.crypto.openssl_csr: + path: '{{ output_dir }}/ansible_{{ key }}.csr' + privatekey_path: '{{ output_dir }}/ansible_{{ key }}.key' + basic_constraints: + - CA:TRUE + key_usage: + - keyCertSign + - name: Generate self-signed certificate + loop: + - key1 + - key2 + loop_control: + loop_var: key + community.crypto.openssl_certificate: + path: '{{ output_dir }}/ansible_{{ key }}.pem' + privatekey_path: '{{ output_dir }}/ansible_{{ key }}.key' + csr_path: '{{ output_dir }}/ansible_{{ key }}.csr' + provider: selfsigned + - name: signing_ca_cert and signing_ca_key (check mode) + docker_swarm: + advertise_addr: '{{ansible_default_ipv4.address | default(''127.0.0.1'')}}' + state: present + signing_ca_cert: '{{ lookup(''file'', output_dir ~ ''/ansible_key1.pem'') }}' + signing_ca_key: '{{ lookup(''file'', output_dir ~ ''/ansible_key1.key'') }}' + timeout: 120 + check_mode: true + diff: true + register: output_1 + ignore_errors: true + - name: signing_ca_cert and signing_ca_key + docker_swarm: + advertise_addr: '{{ansible_default_ipv4.address | default(''127.0.0.1'')}}' + state: present + signing_ca_cert: '{{ lookup(''file'', output_dir ~ ''/ansible_key1.pem'') }}' + signing_ca_key: '{{ lookup(''file'', output_dir ~ ''/ansible_key1.key'') }}' + timeout: 120 + diff: true + register: output_2 + ignore_errors: true + - name: Private key + debug: msg="{{ lookup('file', output_dir ~ '/ansible_key1.key') }}" + - name: Cert + debug: msg="{{ lookup('file', output_dir ~ '/ansible_key1.pem') }}" + - docker_swarm_info: null + register: output + ignore_errors: true + - debug: var=output + - name: signing_ca_cert and signing_ca_key (change, check mode) + docker_swarm: + state: present + signing_ca_cert: '{{ lookup(''file'', output_dir ~ ''/ansible_key2.pem'') }}' + signing_ca_key: '{{ lookup(''file'', output_dir ~ ''/ansible_key2.key'') }}' + timeout: 120 + check_mode: true + diff: true + register: output_5 + ignore_errors: true + - name: signing_ca_cert and signing_ca_key (change) + docker_swarm: + state: present + signing_ca_cert: '{{ lookup(''file'', output_dir ~ ''/ansible_key2.pem'') }}' + signing_ca_key: '{{ lookup(''file'', output_dir ~ ''/ansible_key2.key'') }}' + timeout: 120 + diff: true + register: output_6 + ignore_errors: true + - name: assert signing_ca_cert and signing_ca_key + assert: + that: + - output_1 is changed + - 'output_1.actions[0] | regex_search("New Swarm cluster created: ")' + - output_1.diff.before is defined + - output_1.diff.after is defined + - output_2 is changed + - 'output_2.actions[0] | regex_search("New Swarm cluster created: ")' + - output_2.diff.before is defined + - output_2.diff.after is defined + - output_5 is changed + - output_5.actions[0] == "Swarm cluster updated" + - output_5.diff.before is defined + - output_5.diff.after is defined + - output_6 is changed + - output_6.actions[0] == "Swarm cluster updated" + - output_6.diff.before is defined + - output_6.diff.after is defined + when: docker_py_version is version('2.6.0', '>=') + - assert: + that: + - output_1 is failed + - ('version is ' ~ docker_py_version ~ ' ') in output_1.msg + - '''Minimum version required is 2.6.0 '' in output_1.msg' + when: docker_py_version is version('2.6.0', '<') + - include_tasks: cleanup.yml diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/options.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/options.yml new file mode 100644 index 00000000..c23ab3a6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/options.yml @@ -0,0 +1,1158 @@ +- debug: + msg: Running tests/options.yml + +- name: Create a Swarm cluster + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" + name: default + diff: yes + +#################################################################### +## autolock_managers ############################################### +#################################################################### + +- name: autolock_managers (check mode) + docker_swarm: + state: present + autolock_managers: yes + check_mode: yes + diff: yes + register: output_1 + ignore_errors: yes + +- name: autolock_managers + docker_swarm: + state: present + autolock_managers: yes + diff: yes + register: output_2 + ignore_errors: yes + +- name: autolock_managers (idempotent) + docker_swarm: + state: present + autolock_managers: yes + diff: yes + register: output_3 + ignore_errors: yes + +- name: autolock_managers (idempotent, check mode) + docker_swarm: + state: present + autolock_managers: yes + check_mode: yes + diff: yes + register: output_4 + ignore_errors: yes + +- name: autolock_managers (change, check mode) + docker_swarm: + state: present + autolock_managers: no + check_mode: yes + diff: yes + register: output_5 + ignore_errors: yes + +- name: autolock_managers (change) + docker_swarm: + state: present + autolock_managers: no + diff: yes + register: output_6 + ignore_errors: yes + +- name: autolock_managers (force new swarm) + docker_swarm: + state: present + force: yes + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" + autolock_managers: yes + diff: yes + register: output_7 + ignore_errors: yes + +- name: assert autolock_managers changes + assert: + that: + - 'output_1 is changed' + - 'output_1.actions[0] == "Swarm cluster updated"' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] == "Swarm cluster updated"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.actions[0] == "No modification"' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.actions[0] == "No modification"' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + - 'output_5 is changed' + - 'output_5.actions[0] == "Swarm cluster updated"' + - 'output_5.diff.before is defined' + - 'output_5.diff.after is defined' + - 'output_6 is changed' + - 'output_6.actions[0] == "Swarm cluster updated"' + - 'output_6.diff.before is defined' + - 'output_6.diff.after is defined' + when: docker_py_version is version('2.6.0', '>=') + +- name: assert UnlockKey in swarm_facts + assert: + that: + - 'output_2.swarm_facts.UnlockKey' + - 'output_3.swarm_facts.UnlockKey is none' + - 'output_6.swarm_facts.UnlockKey is none' + - 'output_7.swarm_facts.UnlockKey' + when: docker_py_version is version('2.7.0', '>=') + +- assert: + that: + - output_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in output_1.msg" + - "'Minimum version required is 2.6.0 ' in output_1.msg" + when: docker_py_version is version('2.6.0', '<') + +#################################################################### +## ca_force_rotate ################################################# +#################################################################### + +- name: ca_force_rotate (check mode) + docker_swarm: + state: present + ca_force_rotate: 1 + check_mode: yes + diff: yes + register: output_1 + ignore_errors: yes + +- name: ca_force_rotate + docker_swarm: + state: present + ca_force_rotate: 1 + diff: yes + register: output_2 + ignore_errors: yes + +- name: ca_force_rotate (idempotent) + docker_swarm: + state: present + ca_force_rotate: 1 + diff: yes + register: output_3 + ignore_errors: yes + +- name: ca_force_rotate (idempotent, check mode) + docker_swarm: + state: present + ca_force_rotate: 1 + check_mode: yes + diff: yes + register: output_4 + ignore_errors: yes + +- name: ca_force_rotate (change, check mode) + docker_swarm: + state: present + ca_force_rotate: 0 + check_mode: yes + diff: yes + register: output_5 + ignore_errors: yes + +- name: ca_force_rotate (change) + docker_swarm: + state: present + ca_force_rotate: 0 + diff: yes + register: output_6 + ignore_errors: yes + +- name: assert ca_force_rotate changes + assert: + that: + - 'output_1 is changed' + - 'output_1.actions[0] == "Swarm cluster updated"' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] == "Swarm cluster updated"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.actions[0] == "No modification"' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.actions[0] == "No modification"' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + - 'output_5 is changed' + - 'output_5.actions[0] == "Swarm cluster updated"' + - 'output_5.diff.before is defined' + - 'output_5.diff.after is defined' + - 'output_6 is changed' + - 'output_6.actions[0] == "Swarm cluster updated"' + - 'output_6.diff.before is defined' + - 'output_6.diff.after is defined' + when: docker_py_version is version('2.6.0', '>=') +- assert: + that: + - output_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in output_1.msg" + - "'Minimum version required is 2.6.0 ' in output_1.msg" + when: docker_py_version is version('2.6.0', '<') + +#################################################################### +## dispatcher_heartbeat_period ##################################### +#################################################################### + +- name: dispatcher_heartbeat_period (check mode) + docker_swarm: + state: present + dispatcher_heartbeat_period: 10 + check_mode: yes + diff: yes + register: output_1 + +- name: dispatcher_heartbeat_period + docker_swarm: + state: present + dispatcher_heartbeat_period: 10 + diff: yes + register: output_2 + +- name: dispatcher_heartbeat_period (idempotent) + docker_swarm: + state: present + dispatcher_heartbeat_period: 10 + diff: yes + register: output_3 + +- name: dispatcher_heartbeat_period (idempotent, check mode) + docker_swarm: + state: present + dispatcher_heartbeat_period: 10 + check_mode: yes + diff: yes + register: output_4 + +- name: dispatcher_heartbeat_period (change, check mode) + docker_swarm: + state: present + dispatcher_heartbeat_period: 23 + check_mode: yes + diff: yes + register: output_5 + +- name: dispatcher_heartbeat_period (change) + docker_swarm: + state: present + dispatcher_heartbeat_period: 23 + diff: yes + register: output_6 + +- name: assert dispatcher_heartbeat_period changes + assert: + that: + - 'output_1 is changed' + - 'output_1.actions[0] == "Swarm cluster updated"' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] == "Swarm cluster updated"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.actions[0] == "No modification"' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.actions[0] == "No modification"' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + - 'output_5 is changed' + - 'output_5.actions[0] == "Swarm cluster updated"' + - 'output_5.diff.before is defined' + - 'output_5.diff.after is defined' + - 'output_6 is changed' + - 'output_6.actions[0] == "Swarm cluster updated"' + - 'output_6.diff.before is defined' + - 'output_6.diff.after is defined' + +#################################################################### +## election_tick ################################################### +#################################################################### + +- name: election_tick (check mode) + docker_swarm: + state: present + election_tick: 20 + check_mode: yes + diff: yes + register: output_1 + +- name: election_tick + docker_swarm: + state: present + election_tick: 20 + diff: yes + register: output_2 + +- name: election_tick (idempotent) + docker_swarm: + state: present + election_tick: 20 + diff: yes + register: output_3 + +- name: election_tick (idempotent, check mode) + docker_swarm: + state: present + election_tick: 20 + check_mode: yes + diff: yes + register: output_4 + +- name: election_tick (change, check mode) + docker_swarm: + state: present + election_tick: 5 + check_mode: yes + diff: yes + register: output_5 + +- name: election_tick (change) + docker_swarm: + state: present + election_tick: 5 + diff: yes + register: output_6 + +- name: assert election_tick changes + assert: + that: + - 'output_1 is changed' + - 'output_1.actions[0] == "Swarm cluster updated"' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] == "Swarm cluster updated"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.actions[0] == "No modification"' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.actions[0] == "No modification"' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + - 'output_5 is changed' + - 'output_5.actions[0] == "Swarm cluster updated"' + - 'output_5.diff.before is defined' + - 'output_5.diff.after is defined' + - 'output_6 is changed' + - 'output_6.actions[0] == "Swarm cluster updated"' + - 'output_6.diff.before is defined' + - 'output_6.diff.after is defined' + +#################################################################### +## heartbeat_tick ################################################## +#################################################################### + +- name: heartbeat_tick (check mode) + docker_swarm: + state: present + heartbeat_tick: 2 + check_mode: yes + diff: yes + register: output_1 + +- name: heartbeat_tick + docker_swarm: + state: present + heartbeat_tick: 2 + diff: yes + register: output_2 + +- name: heartbeat_tick (idempotent) + docker_swarm: + state: present + heartbeat_tick: 2 + diff: yes + register: output_3 + +- name: heartbeat_tick (idempotent, check mode) + docker_swarm: + state: present + heartbeat_tick: 2 + check_mode: yes + diff: yes + register: output_4 + +- name: heartbeat_tick (change, check mode) + docker_swarm: + state: present + heartbeat_tick: 3 + check_mode: yes + diff: yes + register: output_5 + +- name: heartbeat_tick (change) + docker_swarm: + state: present + heartbeat_tick: 3 + diff: yes + register: output_6 + +- name: assert heartbeat_tick changes + assert: + that: + - 'output_1 is changed' + - 'output_1.actions[0] == "Swarm cluster updated"' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] == "Swarm cluster updated"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.actions[0] == "No modification"' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.actions[0] == "No modification"' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + - 'output_5 is changed' + - 'output_5.actions[0] == "Swarm cluster updated"' + - 'output_5.diff.before is defined' + - 'output_5.diff.after is defined' + - 'output_6 is changed' + - 'output_6.actions[0] == "Swarm cluster updated"' + - 'output_6.diff.before is defined' + - 'output_6.diff.after is defined' + +#################################################################### +## keep_old_snapshots ############################################## +#################################################################### +- name: keep_old_snapshots (check mode) + docker_swarm: + state: present + keep_old_snapshots: 1 + check_mode: yes + diff: yes + register: output_1 + +- name: keep_old_snapshots + docker_swarm: + state: present + keep_old_snapshots: 1 + diff: yes + register: output_2 + +- name: keep_old_snapshots (idempotent) + docker_swarm: + state: present + keep_old_snapshots: 1 + diff: yes + register: output_3 + +- name: keep_old_snapshots (idempotent, check mode) + docker_swarm: + state: present + keep_old_snapshots: 1 + check_mode: yes + diff: yes + register: output_4 + +- name: keep_old_snapshots (change, check mode) + docker_swarm: + state: present + keep_old_snapshots: 2 + check_mode: yes + diff: yes + register: output_5 + +- name: keep_old_snapshots (change) + docker_swarm: + state: present + keep_old_snapshots: 2 + diff: yes + register: output_6 + +- name: assert keep_old_snapshots changes + assert: + that: + - 'output_1 is changed' + - 'output_1.actions[0] == "Swarm cluster updated"' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] == "Swarm cluster updated"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.actions[0] == "No modification"' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.actions[0] == "No modification"' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + - 'output_5 is changed' + - 'output_5.actions[0] == "Swarm cluster updated"' + - 'output_5.diff.before is defined' + - 'output_5.diff.after is defined' + - 'output_6 is changed' + - 'output_6.actions[0] == "Swarm cluster updated"' + - 'output_6.diff.before is defined' + - 'output_6.diff.after is defined' + +#################################################################### +## labels ########################################################## +#################################################################### +- name: labels (check mode) + docker_swarm: + state: present + labels: + a: v1 + b: v2 + check_mode: yes + diff: yes + register: output_1 + ignore_errors: yes + +- name: labels + docker_swarm: + state: present + labels: + a: v1 + b: v2 + diff: yes + register: output_2 + ignore_errors: yes + +- name: labels (idempotent) + docker_swarm: + state: present + labels: + a: v1 + b: v2 + diff: yes + register: output_3 + ignore_errors: yes + +- name: labels (idempotent, check mode) + docker_swarm: + state: present + labels: + a: v1 + b: v2 + check_mode: yes + diff: yes + register: output_4 + ignore_errors: yes + +- name: labels (change, check mode) + docker_swarm: + state: present + labels: + a: v1 + c: v3 + check_mode: yes + diff: yes + register: output_5 + ignore_errors: yes + +- name: labels (change) + docker_swarm: + state: present + labels: + a: v1 + c: v3 + diff: yes + register: output_6 + ignore_errors: yes + +- name: labels (not specifying, check mode) + docker_swarm: + state: present + check_mode: yes + diff: yes + register: output_7 + ignore_errors: yes + +- name: labels (not specifying) + docker_swarm: + state: present + diff: yes + register: output_8 + ignore_errors: yes + +- name: labels (idempotency, check that labels are still there) + docker_swarm: + state: present + labels: + a: v1 + c: v3 + diff: yes + register: output_9 + ignore_errors: yes + +- name: labels (empty, check mode) + docker_swarm: + state: present + labels: {} + check_mode: yes + diff: yes + register: output_10 + ignore_errors: yes + +- name: labels (empty) + docker_swarm: + state: present + labels: {} + diff: yes + register: output_11 + ignore_errors: yes + +- name: labels (empty, idempotent, check mode) + docker_swarm: + state: present + labels: {} + check_mode: yes + diff: yes + register: output_12 + ignore_errors: yes + +- name: labels (empty, idempotent) + docker_swarm: + state: present + labels: {} + diff: yes + register: output_13 + ignore_errors: yes + +- name: assert labels changes + assert: + that: + - 'output_1 is changed' + - 'output_1.actions[0] == "Swarm cluster updated"' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] == "Swarm cluster updated"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.actions[0] == "No modification"' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.actions[0] == "No modification"' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + - 'output_5 is changed' + - 'output_5.actions[0] == "Swarm cluster updated"' + - 'output_5.diff.before is defined' + - 'output_5.diff.after is defined' + - 'output_6 is changed' + - 'output_6.actions[0] == "Swarm cluster updated"' + - 'output_6.diff.before is defined' + - 'output_6.diff.after is defined' + - 'output_7 is not changed' + - 'output_7.actions[0] == "No modification"' + - 'output_7.diff.before is defined' + - 'output_7.diff.after is defined' + - 'output_8 is not changed' + - 'output_8.actions[0] == "No modification"' + - 'output_8.diff.before is defined' + - 'output_8.diff.after is defined' + - 'output_9 is not changed' + - 'output_9.actions[0] == "No modification"' + - 'output_9.diff.before is defined' + - 'output_9.diff.after is defined' + - 'output_10 is changed' + - 'output_10.actions[0] == "Swarm cluster updated"' + - 'output_10.diff.before is defined' + - 'output_10.diff.after is defined' + - 'output_11 is changed' + - 'output_11.actions[0] == "Swarm cluster updated"' + - 'output_11.diff.before is defined' + - 'output_11.diff.after is defined' + - 'output_12 is not changed' + - 'output_12.actions[0] == "No modification"' + - 'output_12.diff.before is defined' + - 'output_12.diff.after is defined' + - 'output_13 is not changed' + - 'output_13.actions[0] == "No modification"' + - 'output_13.diff.before is defined' + - 'output_13.diff.after is defined' + when: docker_py_version is version('2.6.0', '>=') +- assert: + that: + - output_1 is failed + - "('version is ' ~ docker_py_version ~ ' ') in output_1.msg" + - "'Minimum version required is 2.6.0 ' in output_1.msg" + when: docker_py_version is version('2.6.0', '<') + +#################################################################### +## log_entries_for_slow_followers ################################## +#################################################################### +- name: log_entries_for_slow_followers (check mode) + docker_swarm: + state: present + log_entries_for_slow_followers: 42 + check_mode: yes + diff: yes + register: output_1 + +- name: log_entries_for_slow_followers + docker_swarm: + state: present + log_entries_for_slow_followers: 42 + diff: yes + register: output_2 + +- name: log_entries_for_slow_followers (idempotent) + docker_swarm: + state: present + log_entries_for_slow_followers: 42 + diff: yes + register: output_3 + +- name: log_entries_for_slow_followers (idempotent, check mode) + docker_swarm: + state: present + log_entries_for_slow_followers: 42 + check_mode: yes + diff: yes + register: output_4 + +- name: log_entries_for_slow_followers (change, check mode) + docker_swarm: + state: present + log_entries_for_slow_followers: 23 + check_mode: yes + diff: yes + register: output_5 + +- name: log_entries_for_slow_followers (change) + docker_swarm: + state: present + log_entries_for_slow_followers: 23 + diff: yes + register: output_6 + +- name: assert log_entries_for_slow_followers changes + assert: + that: + - 'output_1 is changed' + - 'output_1.actions[0] == "Swarm cluster updated"' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] == "Swarm cluster updated"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.actions[0] == "No modification"' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.actions[0] == "No modification"' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + - 'output_5 is changed' + - 'output_5.actions[0] == "Swarm cluster updated"' + - 'output_5.diff.before is defined' + - 'output_5.diff.after is defined' + - 'output_6 is changed' + - 'output_6.actions[0] == "Swarm cluster updated"' + - 'output_6.diff.before is defined' + - 'output_6.diff.after is defined' + +#################################################################### +## name ############################################################ +#################################################################### +- name: name (idempotent, check mode) + docker_swarm: + state: present + name: default + check_mode: yes + diff: yes + register: output_1 + +- name: name (idempotent) + docker_swarm: + state: present + name: default + diff: yes + register: output_2 + +# The name 'default' is hardcoded in docker swarm. Trying to change +# it causes a failure. This might change in the future, so we also +# accept a change for this test. +- name: name (change, should fail) + docker_swarm: + state: present + name: foobar + diff: yes + register: output_3 + ignore_errors: yes + +- name: assert name changes + assert: + that: + - 'output_1 is not changed' + - 'output_1.actions[0] == "No modification"' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is not changed' + - 'output_2.actions[0] == "No modification"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is failed or output_3 is changed' + +#################################################################### +## node_cert_expiry ################################################ +#################################################################### +- name: node_cert_expiry (check mode) + docker_swarm: + state: present + node_cert_expiry: 7896000000000000 + check_mode: yes + diff: yes + register: output_1 + +- name: node_cert_expiry + docker_swarm: + state: present + node_cert_expiry: 7896000000000000 + diff: yes + register: output_2 + +- name: node_cert_expiry (idempotent) + docker_swarm: + state: present + node_cert_expiry: 7896000000000000 + diff: yes + register: output_3 + +- name: node_cert_expiry (idempotent, check mode) + docker_swarm: + state: present + node_cert_expiry: 7896000000000000 + check_mode: yes + diff: yes + register: output_4 + +- name: node_cert_expiry (change, check mode) + docker_swarm: + state: present + node_cert_expiry: 8766000000000000 + check_mode: yes + diff: yes + register: output_5 + +- name: node_cert_expiry (change) + docker_swarm: + state: present + node_cert_expiry: 8766000000000000 + diff: yes + register: output_6 + +- name: assert node_cert_expiry changes + assert: + that: + - 'output_1 is changed' + - 'output_1.actions[0] == "Swarm cluster updated"' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] == "Swarm cluster updated"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.actions[0] == "No modification"' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.actions[0] == "No modification"' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + - 'output_5 is changed' + - 'output_5.actions[0] == "Swarm cluster updated"' + - 'output_5.diff.before is defined' + - 'output_5.diff.after is defined' + - 'output_6 is changed' + - 'output_6.actions[0] == "Swarm cluster updated"' + - 'output_6.diff.before is defined' + - 'output_6.diff.after is defined' + +#################################################################### +## rotate_manager_token ############################################ +#################################################################### +- name: rotate_manager_token (true, check mode) + docker_swarm: + state: present + rotate_manager_token: yes + check_mode: yes + diff: yes + register: output_1 + +- name: rotate_manager_token (true) + docker_swarm: + state: present + rotate_manager_token: yes + diff: yes + register: output_2 + +- name: rotate_manager_token (false, idempotent) + docker_swarm: + state: present + rotate_manager_token: no + diff: yes + register: output_3 + +- name: rotate_manager_token (false, check mode) + docker_swarm: + state: present + rotate_manager_token: no + check_mode: yes + diff: yes + register: output_4 + +- name: assert rotate_manager_token changes + assert: + that: + - 'output_1 is changed' + - 'output_1.actions[0] == "Swarm cluster updated"' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] == "Swarm cluster updated"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.actions[0] == "No modification"' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.actions[0] == "No modification"' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + +#################################################################### +## rotate_worker_token ############################################# +#################################################################### +- name: rotate_worker_token (true, check mode) + docker_swarm: + state: present + rotate_worker_token: yes + check_mode: yes + diff: yes + register: output_1 + +- name: rotate_worker_token (true) + docker_swarm: + state: present + rotate_worker_token: yes + diff: yes + register: output_2 + +- name: rotate_worker_token (false, idempotent) + docker_swarm: + state: present + rotate_worker_token: no + diff: yes + register: output_3 + +- name: rotate_worker_token (false, check mode) + docker_swarm: + state: present + rotate_worker_token: no + check_mode: yes + diff: yes + register: output_4 + +- name: assert rotate_worker_token changes + assert: + that: + - 'output_1 is changed' + - 'output_1.actions[0] == "Swarm cluster updated"' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] == "Swarm cluster updated"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.actions[0] == "No modification"' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.actions[0] == "No modification"' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + +#################################################################### +## snapshot_interval ############################################### +#################################################################### +- name: snapshot_interval (check mode) + docker_swarm: + state: present + snapshot_interval: 12345 + check_mode: yes + diff: yes + register: output_1 + +- name: snapshot_interval + docker_swarm: + state: present + snapshot_interval: 12345 + diff: yes + register: output_2 + +- name: snapshot_interval (idempotent) + docker_swarm: + state: present + snapshot_interval: 12345 + diff: yes + register: output_3 + +- name: snapshot_interval (idempotent, check mode) + docker_swarm: + state: present + snapshot_interval: 12345 + check_mode: yes + diff: yes + register: output_4 + +- name: snapshot_interval (change, check mode) + docker_swarm: + state: present + snapshot_interval: 54321 + check_mode: yes + diff: yes + register: output_5 + +- name: snapshot_interval (change) + docker_swarm: + state: present + snapshot_interval: 54321 + diff: yes + register: output_6 + +- name: assert snapshot_interval changes + assert: + that: + - 'output_1 is changed' + - 'output_1.actions[0] == "Swarm cluster updated"' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] == "Swarm cluster updated"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.actions[0] == "No modification"' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.actions[0] == "No modification"' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + - 'output_5 is changed' + - 'output_5.actions[0] == "Swarm cluster updated"' + - 'output_5.diff.before is defined' + - 'output_5.diff.after is defined' + - 'output_6 is changed' + - 'output_6.actions[0] == "Swarm cluster updated"' + - 'output_6.diff.before is defined' + - 'output_6.diff.after is defined' + +#################################################################### +## task_history_retention_limit #################################### +#################################################################### +- name: task_history_retention_limit (check mode) + docker_swarm: + state: present + task_history_retention_limit: 23 + check_mode: yes + diff: yes + register: output_1 + +- name: task_history_retention_limit + docker_swarm: + state: present + task_history_retention_limit: 23 + diff: yes + register: output_2 + +- name: task_history_retention_limit (idempotent) + docker_swarm: + state: present + task_history_retention_limit: 23 + diff: yes + register: output_3 + +- name: task_history_retention_limit (idempotent, check mode) + docker_swarm: + state: present + task_history_retention_limit: 23 + check_mode: yes + diff: yes + register: output_4 + +- name: task_history_retention_limit (change, check mode) + docker_swarm: + state: present + task_history_retention_limit: 7 + check_mode: yes + diff: yes + register: output_5 + +- name: task_history_retention_limit (change) + docker_swarm: + state: present + task_history_retention_limit: 7 + diff: yes + register: output_6 + +- name: assert task_history_retention_limit changes + assert: + that: + - 'output_1 is changed' + - 'output_1.actions[0] == "Swarm cluster updated"' + - 'output_1.diff.before is defined' + - 'output_1.diff.after is defined' + - 'output_2 is changed' + - 'output_2.actions[0] == "Swarm cluster updated"' + - 'output_2.diff.before is defined' + - 'output_2.diff.after is defined' + - 'output_3 is not changed' + - 'output_3.actions[0] == "No modification"' + - 'output_3.diff.before is defined' + - 'output_3.diff.after is defined' + - 'output_4 is not changed' + - 'output_4.actions[0] == "No modification"' + - 'output_4.diff.before is defined' + - 'output_4.diff.after is defined' + - 'output_5 is changed' + - 'output_5.actions[0] == "Swarm cluster updated"' + - 'output_5.diff.before is defined' + - 'output_5.diff.after is defined' + - 'output_6 is changed' + - 'output_6.actions[0] == "Swarm cluster updated"' + - 'output_6.diff.before is defined' + - 'output_6.diff.after is defined' + +- include_tasks: cleanup.yml diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/remote-addr-pool.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/remote-addr-pool.yml new file mode 100644 index 00000000..a900953e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/remote-addr-pool.yml @@ -0,0 +1,90 @@ +- debug: + msg: Running tests/remote-addr-pool.yml + +#################################################################### +## default_addr_pool ############################################### +#################################################################### + +- name: default_addr_pool + docker_swarm: + state: present + default_addr_pool: + - "2.0.0.0/16" + diff: yes + register: output_1 + ignore_errors: yes + +- name: default_addr_pool (idempotent) + docker_swarm: + state: present + default_addr_pool: + - "2.0.0.0/16" + diff: yes + register: output_2 + ignore_errors: yes + +- name: assert default_addr_pool + assert: + that: + - 'output_1 is changed' + - 'output_2 is not changed' + - 'output_2.swarm_facts.DefaultAddrPool == ["2.0.0.0/16"]' + when: + - docker_api_version is version('1.39', '>=') + - docker_py_version is version('4.0.0', '>=') + +- name: assert default_addr_pool failed when unsupported + assert: + that: + - 'output_1 is failed' + - "'Minimum version required' in output_1.msg" + when: docker_api_version is version('1.39', '<') or + docker_py_version is version('4.0.0', '<') + +#################################################################### +## subnet_size ##################################################### +#################################################################### +- name: Leave swarm + docker_swarm: + state: absent + force: yes + default_addr_pool: + - "2.0.0.0/16" + diff: yes + +- name: subnet_size + docker_swarm: + state: present + force: yes + subnet_size: 26 + diff: yes + register: output_1 + ignore_errors: yes + +- name: subnet_size (idempotent) + docker_swarm: + state: present + subnet_size: 26 + diff: yes + register: output_2 + ignore_errors: yes + +- name: assert subnet_size + assert: + that: + - 'output_1 is changed' + - 'output_2 is not changed' + - 'output_2.swarm_facts.SubnetSize == 26' + when: + - docker_api_version is version('1.39', '>=') + - docker_py_version is version('4.0.0', '>=') + +- name: assert subnet_size failed when unsupported + assert: + that: + - output_1 is failed + - "'Minimum version required' in output_1.msg" + when: docker_api_version is version('1.39', '<') or + docker_py_version is version('4.0.0', '<') + +- include_tasks: cleanup.yml diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/aliases new file mode 100644 index 00000000..6eae8bd8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/tasks/main.yml new file mode 100644 index 00000000..1421701f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/tasks/main.yml @@ -0,0 +1,11 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- include_tasks: test_swarm_info.yml + # Maximum of 1.24 (docker API version for docker_swarm_info) and 1.25 (docker API version for docker_swarm) is 1.25 + when: docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.25', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_swarm_info tests!" + when: not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/tasks/test_swarm_info.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/tasks/test_swarm_info.yml new file mode 100644 index 00000000..349d7cc5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/tasks/test_swarm_info.yml @@ -0,0 +1,190 @@ +--- +- block: + - name: Make sure we're not already using Docker swarm + docker_swarm: + state: absent + force: true + + - name: Try to get docker_swarm_info when docker is not running in swarm mode + docker_swarm_info: + ignore_errors: yes + register: output + + - name: assert failure when called when swarm is not in use or not run on mamager node + assert: + that: + - 'output is failed' + - 'output.msg == "Error running docker swarm module: must run on swarm manager node"' + - 'output.can_talk_to_docker == true' + - 'output.docker_swarm_active == false' + - 'output.docker_swarm_manager == false' + - 'output.swarm_unlock_key is not defined' + + - name: Create a Swarm cluster + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" + register: output + + - name: assert changed when create a new swarm cluster + assert: + that: + - 'output is changed' + - 'output.actions[0] | regex_search("New Swarm cluster created: ")' + - 'output.swarm_facts.JoinTokens.Manager' + - 'output.swarm_facts.JoinTokens.Worker' + + - name: Try to get docker_swarm_info when docker is running in swarm mode and as manager + docker_swarm_info: + register: output + + - name: assert creding docker swarm facts + assert: + that: + - 'output.swarm_facts.JoinTokens.Manager' + - 'output.swarm_facts.JoinTokens.Worker' + - 'output.swarm_facts.ID' + - 'output.can_talk_to_docker == true' + - 'output.docker_swarm_active == true' + - 'output.docker_swarm_manager == true' + - 'output.swarm_unlock_key is not defined' + + - name: Try to get docker_swarm_info and list of nodes when docker is running in swarm mode and as manager + docker_swarm_info: + nodes: yes + register: output + + - name: assert reding swarm facts with list of nodes option + assert: + that: + - 'output.swarm_facts.JoinTokens.Manager' + - 'output.swarm_facts.JoinTokens.Worker' + - 'output.swarm_facts.ID' + - 'output.nodes[0].ID is string' + - 'output.can_talk_to_docker == true' + - 'output.docker_swarm_active == true' + - 'output.docker_swarm_manager == true' + - 'output.swarm_unlock_key is not defined' + + - name: Get local docker node name + set_fact: + localnodename: "{{ output.nodes[0].Hostname }}" + + + - name: Try to get docker_swarm_info and verbose list of nodes when docker is running in swarm mode and as manager + docker_swarm_info: + nodes: yes + verbose_output: yes + register: output + + - name: assert reading swarm facts with list of nodes and versbose output options + assert: + that: + - 'output.swarm_facts.JoinTokens.Manager' + - 'output.swarm_facts.JoinTokens.Worker' + - 'output.swarm_facts.ID' + - 'output.nodes[0].ID is string' + - 'output.nodes[0].CreatedAt' + - 'output.can_talk_to_docker == true' + - 'output.docker_swarm_active == true' + - 'output.docker_swarm_manager == true' + - 'output.swarm_unlock_key is not defined' + + - name: Try to get docker_swarm_info and list of nodes with filters providing existing node name + docker_swarm_info: + nodes: yes + nodes_filters: + name: "{{ localnodename }}" + register: output + + - name: assert reading reading swarm facts and using node filter (random node name) + assert: + that: + - 'output.swarm_facts.JoinTokens.Manager' + - 'output.swarm_facts.JoinTokens.Worker' + - 'output.swarm_facts.ID' + - 'output.nodes | length == 1' + - 'output.can_talk_to_docker == true' + - 'output.docker_swarm_active == true' + - 'output.docker_swarm_manager == true' + - 'output.swarm_unlock_key is not defined' + + - name: Create random name + set_fact: + randomnodename: "{{ 'node-%0x' % ((2**32) | random) }}" + + - name: Try to get docker_swarm_info and list of nodes with filters providing non-existing random node name + docker_swarm_info: + nodes: yes + nodes_filters: + name: "{{ randomnodename }}" + register: output + + - name: assert reading reading swarm facts and using node filter (random node name) + assert: + that: + - 'output.swarm_facts.JoinTokens.Manager' + - 'output.swarm_facts.JoinTokens.Worker' + - 'output.swarm_facts.ID' + - 'output.nodes | length == 0' + - 'output.can_talk_to_docker == true' + - 'output.docker_swarm_active == true' + - 'output.docker_swarm_manager == true' + - 'output.swarm_unlock_key is not defined' + + - name: Try to get docker_swarm_info and swarm_unlock_key on non a unlocked swarm + docker_swarm_info: + unlock_key: yes + register: output + ignore_errors: yes + + - name: assert reading swarm facts and non existing swarm unlock key + assert: + that: + - 'output.swarm_unlock_key is none' + - 'output.can_talk_to_docker == true' + - 'output.docker_swarm_active == true' + - 'output.docker_swarm_manager == true' + when: docker_py_version is version('2.7.0', '>=') + - assert: + that: + - output is failed + - "('version is ' ~ docker_py_version ~ ' ') in output.msg" + - "'Minimum version required is 2.7.0 ' in output.msg" + when: docker_py_version is version('2.7.0', '<') + + - name: Update swarm cluster to be locked + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" + autolock_managers: true + register: autolock_managers_update_output + ignore_errors: yes + + - name: Try to get docker_swarm_info and swarm_unlock_key + docker_swarm_info: + unlock_key: yes + register: output + ignore_errors: yes + + - name: assert reading swarm facts and swarm unlock key + assert: + that: + - 'output.swarm_unlock_key is string' + - 'output.swarm_unlock_key == autolock_managers_update_output.swarm_facts.UnlockKey' + - 'output.can_talk_to_docker == true' + - 'output.docker_swarm_active == true' + - 'output.docker_swarm_manager == true' + when: docker_py_version is version('2.7.0', '>=') + - assert: + that: + - output is failed + - "('version is ' ~ docker_py_version ~ ' ') in output.msg" + - "'Minimum version required is 2.7.0 ' in output.msg" + when: docker_py_version is version('2.7.0', '<') + + always: + - name: Cleanup + docker_swarm: + state: absent + force: true diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/aliases new file mode 100644 index 00000000..cdf1b9b1 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/files/env-file-1 b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/files/env-file-1 new file mode 100644 index 00000000..b15f1b64 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/files/env-file-1 @@ -0,0 +1,2 @@ +TEST3=val3 +TEST4=val4 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/files/env-file-2 b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/files/env-file-2 new file mode 100644 index 00000000..eff99aca --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/files/env-file-2 @@ -0,0 +1,2 @@ +TEST3=val5 +TEST5=val5 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/main.yml new file mode 100644 index 00000000..4cb69597 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/main.yml @@ -0,0 +1,80 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + + +# Create random name prefix (for containers, networks, ...) +- name: Create random name prefix + set_fact: + name_prefix: "{{ 'ansible-test-%0x' % ((2**32) | random) }}" + service_names: [] + network_names: [] + config_names: [] + secret_names: [] + volume_names: [] + +- debug: + msg: "Using container name prefix {{ name_prefix }}" + +# Run the tests +- block: + - name: Create a Swarm cluster + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" + + - include_tasks: run-test.yml + with_fileglob: + - "tests/*.yml" + + always: + - name: Make sure all services are removed + docker_swarm_service: + name: "{{ item }}" + state: absent + loop: "{{ service_names }}" + ignore_errors: yes + + - name: Make sure all networks are removed + docker_network: + name: "{{ item }}" + state: absent + force: yes + loop: "{{ network_names }}" + ignore_errors: yes + + - name: Make sure all configs are removed + docker_config: + name: "{{ item }}" + state: absent + force: yes + loop: "{{ config_names }}" + ignore_errors: yes + + - name: Make sure all volumes are removed + docker_volume: + name: "{{ item }}" + state: absent + loop: "{{ volume_names }}" + ignore_errors: yes + + - name: Make sure all secrets are removed + docker_secret: + name: "{{ item }}" + state: absent + force: yes + loop: "{{ secret_names }}" + ignore_errors: yes + + - name: Make sure swarm is removed + docker_swarm: + state: absent + force: yes + ignore_errors: yes + # Maximum of 1.24 (docker API version for docker_swarm_service) and 1.25 (docker API version for docker_swarm) is 1.25 + when: docker_py_version is version('2.0.2', '>=') and docker_api_version is version('1.25', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_swarm_service tests!" + when: not(docker_py_version is version('2.0.2', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/run-test.yml new file mode 100644 index 00000000..a2999370 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/run-test.yml @@ -0,0 +1,3 @@ +--- +- name: "Loading tasks from {{ item }}" + include_tasks: "{{ item }}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/configs.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/configs.yml new file mode 100644 index 00000000..ad4d5695 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/configs.yml @@ -0,0 +1,413 @@ +--- + +- name: Registering container name + set_fact: + service_name: "{{ name_prefix ~ '-configs' }}" + config_name_1: "{{ name_prefix ~ '-configs-1' }}" + config_name_2: "{{ name_prefix ~ '-configs-2' }}" + +- name: Registering container name + set_fact: + config_names: "{{ config_names + [config_name_1, config_name_2] }}" + +- docker_config: + name: "{{ config_name_1 }}" + data: "hello" + state: present + register: "config_result_1" + when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=') + +- docker_config: + name: "{{ config_name_2 }}" + data: "test" + state: present + register: "config_result_2" + when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=') + +#################################################################### +## configs ######################################################### +#################################################################### + +- name: configs + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + filename: "/tmp/{{ config_name_1 }}.txt" + register: configs_1 + ignore_errors: yes + +- name: configs (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_name: "{{ config_name_1 }}" + filename: "/tmp/{{ config_name_1 }}.txt" + register: configs_2 + ignore_errors: yes + +- name: configs (add) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + filename: "/tmp/{{ config_name_1 }}.txt" + - config_name: "{{ config_name_2 }}" + filename: "/tmp/{{ config_name_2 }}.txt" + register: configs_3 + ignore_errors: yes + +- name: configs (add idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_name: "{{ config_name_1 }}" + filename: "/tmp/{{ config_name_1 }}.txt" + - config_id: "{{ config_result_2.config_id|default('') }}" + config_name: "{{ config_name_2 }}" + filename: "/tmp/{{ config_name_2 }}.txt" + register: configs_4 + ignore_errors: yes + +- name: configs (add idempotency no id) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_name: "{{ config_name_1 }}" + filename: "/tmp/{{ config_name_1 }}.txt" + - config_name: "{{ config_name_2 }}" + filename: "/tmp/{{ config_name_2 }}.txt" + register: configs_5 + ignore_errors: yes + +- name: configs (add idempotency no id and re-ordered) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_name: "{{ config_name_2 }}" + filename: "/tmp/{{ config_name_2 }}.txt" + - config_name: "{{ config_name_1 }}" + filename: "/tmp/{{ config_name_1 }}.txt" + register: configs_6 + ignore_errors: yes + +- name: configs (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: [] + register: configs_7 + ignore_errors: yes + +- name: configs (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: [] + register: configs_8 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - configs_1 is changed + - configs_2 is not changed + - configs_3 is changed + - configs_4 is not changed + - configs_5 is not changed + - configs_6 is not changed + - configs_7 is changed + - configs_8 is not changed + when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=') + +- assert: + that: + - configs_1 is failed + - "'Minimum version required' in configs_1.msg" + when: docker_api_version is version('1.30', '<') or docker_py_version is version('2.6.0', '<') + +#################################################################### +## configs (uid) ################################################### +#################################################################### + +- name: configs (uid int) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + uid: 1000 + register: configs_1 + ignore_errors: yes + +- name: configs (uid int idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + uid: 1000 + register: configs_2 + ignore_errors: yes + +- name: configs (uid int change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + uid: 1002 + register: configs_3 + ignore_errors: yes + +- name: configs (uid str) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + uid: "1001" + register: configs_4 + ignore_errors: yes + +- name: configs (uid str idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + uid: "1001" + register: configs_5 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no +- assert: + that: + - configs_1 is changed + - configs_2 is not changed + - configs_3 is changed + - configs_4 is changed + - configs_5 is not changed + when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=') + +- assert: + that: + - configs_1 is failed + - "'Minimum version required' in configs_1.msg" + when: docker_api_version is version('1.30', '<') or docker_py_version is version('2.6.0', '<') + + +#################################################################### +## configs (gid) ################################################### +#################################################################### + +- name: configs (gid int) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + gid: 1000 + register: configs_1 + ignore_errors: yes + +- name: configs (gid int idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + gid: 1000 + register: configs_2 + ignore_errors: yes + +- name: configs (gid int change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + gid: 1002 + register: configs_3 + ignore_errors: yes + +- name: configs (gid str) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + gid: "1001" + register: configs_4 + ignore_errors: yes + +- name: configs (gid str idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + gid: "1001" + register: configs_5 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no +- assert: + that: + - configs_1 is changed + - configs_2 is not changed + - configs_3 is changed + - configs_4 is changed + - configs_5 is not changed + when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=') + +- assert: + that: + - configs_1 is failed + - "'Minimum version required' in configs_1.msg" + when: docker_api_version is version('1.30', '<') or docker_py_version is version('2.6.0', '<') + +#################################################################### +## configs (mode) ################################################## +#################################################################### + +- name: configs (mode) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + mode: 0600 + register: configs_1 + ignore_errors: yes + +- name: configs (mode idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + mode: 0600 + register: configs_2 + ignore_errors: yes + +- name: configs (mode change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + configs: + - config_id: "{{ config_result_1.config_id|default('') }}" + config_name: "{{ config_name_1 }}" + mode: 0777 + register: configs_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no +- assert: + that: + - configs_1 is changed + - configs_2 is not changed + - configs_3 is changed + when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=') + +- assert: + that: + - configs_1 is failed + - "'Minimum version required' in configs_1.msg" + when: docker_api_version is version('1.30', '<') or docker_py_version is version('2.6.0', '<') + +#################################################################### +#################################################################### +#################################################################### + +- name: Delete configs + docker_config: + name: "{{ config_name }}" + state: absent + force: yes + loop: + - "{{ config_name_1 }}" + - "{{ config_name_2 }}" + loop_control: + loop_var: config_name + ignore_errors: yes + when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=') diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/logging.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/logging.yml new file mode 100644 index 00000000..938b8a4f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/logging.yml @@ -0,0 +1,135 @@ +--- + +- name: Registering service name + set_fact: + service_name: "{{ name_prefix ~ '-logging' }}" + +- name: Registering service name + set_fact: + service_names: "{{ service_names + [service_name] }}" + +#################################################################### +## logging.driver ################################################## +#################################################################### + +- name: logging.driver + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + logging: + driver: json-file + register: logging_driver_1 + +- name: logging.driver (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + logging: + driver: json-file + register: logging_driver_2 + +- name: logging.driver (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + logging: + driver: syslog + register: logging_driver_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - logging_driver_1 is changed + - logging_driver_2 is not changed + - logging_driver_3 is changed + +#################################################################### +## logging.options ################################################# +#################################################################### + +- name: logging_options + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + logging: + driver: json-file + options: + labels: production_status + env: os,customer + register: logging_options_1 + +- name: logging_options (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + logging: + driver: json-file + options: + env: os,customer + labels: production_status + register: logging_options_2 + +- name: logging_options (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + logging: + driver: json-file + options: + env: os,customer + labels: production_status + max-file: "1" + register: logging_options_3 + +- name: logging_options (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + logging: + driver: json-file + options: {} + register: logging_options_4 + +- name: logging_options (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + logging: + driver: json-file + options: {} + register: logging_options_5 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - logging_options_1 is changed + - logging_options_2 is not changed + - logging_options_3 is changed + - logging_options_4 is changed + - logging_options_5 is not changed diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/misc.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/misc.yml new file mode 100644 index 00000000..7d24e089 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/misc.yml @@ -0,0 +1,113 @@ +--- +- block: + - name: Create a swarm service without name + register: output + docker_swarm_service: + state: present + ignore_errors: yes + + - name: assert failure when name not set + assert: + that: + - output is failed + - 'output.msg == "missing required arguments: name"' + + - name: Remove an non-existing service + register: output + docker_swarm_service: + state: absent + name: non_existing_service + + - name: assert output not changed when deleting non-existing service + assert: + that: + - output is not changed + + - name: create sample service + register: output + docker_swarm_service: + name: test_service + endpoint_mode: dnsrr + image: "{{ docker_test_image_busybox }}" + resolve_image: no + args: + - sleep + - "3600" + + - name: assert sample service is created + assert: + that: + - output is changed + + - name: change service args + register: output + docker_swarm_service: + name: test_service + image: "{{ docker_test_image_busybox }}" + resolve_image: no + args: + - sleep + - "1800" + + - name: assert service args are correct + assert: + that: + - output.swarm_service.args == ['sleep', '1800'] + + - name: set service mode to global + register: output + docker_swarm_service: + name: test_service + image: "{{ docker_test_image_busybox }}" + resolve_image: no + endpoint_mode: vip + mode: global + args: + - sleep + - "1800" + + - name: assert service mode changed caused service rebuild + assert: + that: + - output.rebuilt + + - name: add published ports to service + register: output + docker_swarm_service: + name: test_service + image: "{{ docker_test_image_busybox }}" + resolve_image: no + mode: global + args: + - sleep + - "1800" + endpoint_mode: vip + publish: + - protocol: tcp + published_port: 60001 + target_port: 60001 + - protocol: udp + published_port: 60001 + target_port: 60001 + + - name: fake image key as it is not predictable + set_fact: + ansible_docker_service_output: "{{ output.swarm_service|combine({'image': docker_test_image_busybox}) }}" + + - name: assert service matches expectations + assert: + that: + - ansible_docker_service_output == service_expected_output + + - name: delete sample service + register: output + docker_swarm_service: + name: test_service + state: absent + + - name: assert service deletion returns changed + assert: + that: + - output is success + - output is changed + when: docker_api_version is version('1.24', '>=') and docker_py_version is version('3.0.0', '>=') diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/mounts.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/mounts.yml new file mode 100644 index 00000000..441547d9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/mounts.yml @@ -0,0 +1,601 @@ +- name: Registering service name + set_fact: + service_name: "{{ name_prefix ~ '-mounts' }}" + volume_name_1: "{{ name_prefix ~ '-volume-1' }}" + volume_name_2: "{{ name_prefix ~ '-volume-2' }}" + +- name: Registering service name + set_fact: + service_names: "{{ service_names + [service_name] }}" + volume_names: "{{ volume_names + [volume_name_1, volume_name_2] }}" + +- docker_volume: + name: "{{ volume_name }}" + state: present + loop: + - "{{ volume_name_1 }}" + - "{{ volume_name_2 }}" + loop_control: + loop_var: volume_name + +#################################################################### +## mounts ########################################################## +#################################################################### + +- name: mounts + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "volume" + register: mounts_1 + +- name: mounts (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "volume" + register: mounts_2 + +- name: mounts (add) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "volume" + - source: "/tmp/" + target: "/tmp/{{ volume_name_2 }}" + type: "bind" + register: mounts_3 + +- name: mounts (order idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "/tmp/" + target: "/tmp/{{ volume_name_2 }}" + type: "bind" + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "volume" + register: mounts_4 + +- name: mounts (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: [] + register: mounts_5 + +- name: mounts (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: [] + register: mounts_6 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - mounts_1 is changed + - mounts_2 is not changed + - mounts_3 is changed + - mounts_4 is not changed + - mounts_5 is changed + - mounts_6 is not changed + +#################################################################### +## mounts.readonly ################################################# +#################################################################### + +- name: mounts.readonly + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + readonly: true + register: mounts_readonly_1 + + +- name: mounts.readonly (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + readonly: true + register: mounts_readonly_2 + +- name: mounts.readonly (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + readonly: false + register: mounts_readonly_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - mounts_readonly_1 is changed + - mounts_readonly_2 is not changed + - mounts_readonly_3 is changed + +#################################################################### +## mounts.propagation ############################################## +#################################################################### + +- name: mounts.propagation + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "/tmp" + target: "/tmp/{{ volume_name_1 }}" + type: "bind" + propagation: "slave" + register: mounts_propagation_1 + + +- name: mounts.propagation (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "/tmp" + target: "/tmp/{{ volume_name_1 }}" + type: "bind" + propagation: "slave" + register: mounts_propagation_2 + +- name: mounts.propagation (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "/tmp" + target: "/tmp/{{ volume_name_1 }}" + type: "bind" + propagation: "rprivate" + register: mounts_propagation_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - mounts_propagation_1 is changed + - mounts_propagation_2 is not changed + - mounts_propagation_3 is changed + +#################################################################### +## mounts.labels ################################################## +#################################################################### + +- name: mounts.labels + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "volume" + labels: + mylabel: hello-world + my-other-label: hello-mars + register: mounts_labels_1 + + +- name: mounts.labels (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "volume" + labels: + mylabel: hello-world + my-other-label: hello-mars + register: mounts_labels_2 + +- name: mounts.labels (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "volume" + labels: + mylabel: hello-world + register: mounts_labels_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - mounts_labels_1 is changed + - mounts_labels_2 is not changed + - mounts_labels_3 is changed + +#################################################################### +## mounts.no_copy ################################################## +#################################################################### + +- name: mounts.no_copy + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "volume" + no_copy: true + register: mounts_no_copy_1 + + +- name: mounts.no_copy (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "volume" + no_copy: true + register: mounts_no_copy_2 + +- name: mounts.no_copy (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "volume" + no_copy: false + register: mounts_no_copy_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - mounts_no_copy_1 is changed + - mounts_no_copy_2 is not changed + - mounts_no_copy_3 is changed + +#################################################################### +## mounts.driver_config ############################################ +#################################################################### + +- name: mounts.driver_config + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "volume" + driver_config: + name: "nfs" + options: + addr: "127.0.0.1" + register: mounts_driver_config_1 + +- name: mounts.driver_config + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "volume" + driver_config: + name: "nfs" + options: + addr: "127.0.0.1" + register: mounts_driver_config_2 + +- name: mounts.driver_config + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "volume" + driver_config: + name: "local" + register: mounts_driver_config_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - mounts_driver_config_1 is changed + - mounts_driver_config_2 is not changed + - mounts_driver_config_3 is changed + +#################################################################### +## mounts.tmpfs_size ############################################### +#################################################################### + +- name: mounts.tmpfs_size + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "tmpfs" + tmpfs_size: "50M" + register: mounts_tmpfs_size_1 + ignore_errors: yes + +- name: mounts.tmpfs_size (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "tmpfs" + tmpfs_size: "50M" + register: mounts_tmpfs_size_2 + ignore_errors: yes + +- name: mounts.tmpfs_size (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "tmpfs" + tmpfs_size: "25M" + register: mounts_tmpfs_size_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - mounts_tmpfs_size_1 is changed + - mounts_tmpfs_size_2 is not changed + - mounts_tmpfs_size_3 is changed + when: docker_py_version is version('2.6.0', '>=') +- assert: + that: + - mounts_tmpfs_size_1 is failed + - "'Minimum version required' in mounts_tmpfs_size_1.msg" + when: docker_py_version is version('2.6.0', '<') + +#################################################################### +## mounts.tmpfs_mode ############################################### +#################################################################### + +- name: mounts.tmpfs_mode + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "tmpfs" + tmpfs_mode: 0444 + register: mounts_tmpfs_mode_1 + ignore_errors: yes + +- name: mounts.tmpfs_mode (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "tmpfs" + tmpfs_mode: 0444 + register: mounts_tmpfs_mode_2 + ignore_errors: yes + +- name: mounts.tmpfs_mode (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "{{ volume_name_1 }}" + target: "/tmp/{{ volume_name_1 }}" + type: "tmpfs" + tmpfs_mode: 0777 + register: mounts_tmpfs_mode_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - mounts_tmpfs_mode_1 is changed + - mounts_tmpfs_mode_2 is not changed + - mounts_tmpfs_mode_3 is changed + when: docker_py_version is version('2.6.0', '>=') +- assert: + that: + - mounts_tmpfs_size_1 is failed + - "'Minimum version required' in mounts_tmpfs_size_1.msg" + when: docker_py_version is version('2.6.0', '<') + +#################################################################### +## mounts.source ################################################### +#################################################################### + +- name: mounts.source (empty for tmpfs) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "" + target: "/tmp/{{ volume_name_1 }}" + type: "tmpfs" + register: mounts_tmpfs_source_1 + ignore_errors: yes + +- name: mounts.source (empty for tmpfs idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - source: "" + target: "/tmp/{{ volume_name_1 }}" + type: "tmpfs" + register: mounts_tmpfs_source_2 + ignore_errors: yes + +- name: mounts.source (not specified for tmpfs idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mounts: + - target: "/tmp/{{ volume_name_1 }}" + type: "tmpfs" + register: mounts_tmpfs_source_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - mounts_tmpfs_source_1 is changed + - mounts_tmpfs_source_2 is not changed + - mounts_tmpfs_source_3 is not changed + when: docker_py_version is version('2.6.0', '>=') +- assert: + that: + - mounts_tmpfs_source_1 is failed + - "'Minimum version required' in mounts_tmpfs_source_1.msg" + when: docker_py_version is version('2.6.0', '<') + +#################################################################### +#################################################################### +#################################################################### + +- name: Delete volumes + docker_volume: + name: "{{ volume_name }}" + state: absent + loop: + - "{{ volume_name_1 }}" + - "{{ volume_name_2 }}" + loop_control: + loop_var: volume_name + ignore_errors: yes diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/networks.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/networks.yml new file mode 100644 index 00000000..26a822b6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/networks.yml @@ -0,0 +1,450 @@ +--- + +- name: Registering service name + set_fact: + service_name: "{{ name_prefix ~ '-networks' }}" + network_name_1: "{{ name_prefix ~ '-network-1' }}" + network_name_2: "{{ name_prefix ~ '-network-2' }}" + +- name: Registering service name + set_fact: + service_names: "{{ service_names + [service_name] }}" + network_names: "{{ network_names + [network_name_1, network_name_2] }}" + +- docker_network: + name: "{{ network_name }}" + driver: "overlay" + state: present + loop: + - "{{ network_name_1 }}" + - "{{ network_name_2 }}" + loop_control: + loop_var: network_name + +##################################################################### +## networks ######################################################### +##################################################################### + +- name: networks + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - "{{ network_name_1 }}" + register: networks_1 + +- name: networks (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - "{{ network_name_1 }}" + register: networks_2 + +- name: networks (dict idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + register: networks_3 + +- name: networks (change more) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - "{{ network_name_1 }}" + - "{{ network_name_2 }}" + register: networks_4 + +- name: networks (change more idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - "{{ network_name_1 }}" + - "{{ network_name_2 }}" + register: networks_5 + +- name: networks (change more dict idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + - name: "{{ network_name_2 }}" + register: networks_6 + +- name: networks (change more mixed idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + - "{{ network_name_2 }}" + register: networks_7 + +- name: networks (order idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - "{{ network_name_2 }}" + - name: "{{ network_name_1 }}" + register: networks_8 + +- name: networks (change less) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - "{{ network_name_2 }}" + register: networks_9 + +- name: networks (change less idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - "{{ network_name_2 }}" + register: networks_10 + +- name: networks (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: [] + register: networks_11 + +- name: networks (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: [] + register: networks_12 + +- name: networks (unknown network) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - "idonotexist" + register: networks_13 + ignore_errors: yes + +- name: networks (missing dict key name) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - foo: "bar" + register: networks_14 + ignore_errors: yes + +- name: networks (invalid list type) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - [1, 2, 3] + register: networks_15 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - networks_1 is changed + - networks_2 is not changed + - networks_3 is not changed + - networks_4 is changed + - networks_5 is not changed + - networks_6 is not changed + - networks_7 is not changed + - networks_8 is not changed + - networks_9 is changed + - networks_10 is not changed + - networks_11 is changed + - networks_12 is not changed + - networks_13 is failed + - '"Could not find a network named: ''idonotexist''" in networks_13.msg' + - networks_14 is failed + - "'\"name\" is required when networks are passed as dictionaries.' in networks_14.msg" + - networks_15 is failed + - "'Only a list of strings or dictionaries are allowed to be passed as networks' in networks_15.msg" + +- assert: + that: + - networks_4.rebuilt == false + - networks_7.rebuilt == false + when: docker_api_version is version('1.29', '>=') and docker_py_version is version('2.7.0', '>=') + +- assert: + that: + - networks_4.rebuilt == true + - networks_7.rebuilt == true + when: docker_api_version is version('1.29', '<') or docker_py_version is version('2.7.0', '<') + +#################################################################### +## networks.aliases ################################################ +#################################################################### + +- name: networks.aliases + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + aliases: + - "alias1" + - "alias2" + register: networks_aliases_1 + +- name: networks.aliases (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + aliases: + - "alias1" + - "alias2" + register: networks_aliases_2 + +- name: networks.aliases (order idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + aliases: + - "alias2" + - "alias1" + register: networks_aliases_3 + +- name: networks.aliases (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + aliases: + - "alias1" + register: networks_aliases_4 + +- name: networks.aliases (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + aliases: [] + register: networks_aliases_5 + +- name: networks.aliases (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + aliases: [] + register: networks_aliases_6 + +- name: networks.aliases (invalid type) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + aliases: + - [1, 2, 3] + register: networks_aliases_7 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - networks_aliases_1 is changed + - networks_aliases_2 is not changed + - networks_aliases_3 is not changed + - networks_aliases_4 is changed + - networks_aliases_5 is changed + - networks_aliases_6 is not changed + - networks_aliases_7 is failed + - "'Only strings are allowed as network aliases' in networks_aliases_7.msg" + +#################################################################### +## networks.options ################################################ +#################################################################### + +- name: networks.options + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + options: + foo: bar + test: hello + register: networks_options_1 + +- name: networks.options (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + options: + foo: bar + test: hello + register: networks_options_2 + +- name: networks.options (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + options: + foo: bar + test: hej + register: networks_options_3 + +- name: networks.options (change less) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + options: + foo: bar + register: networks_options_4 + +- name: networks.options (invalid type) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + options: [1, 2, 3] + register: networks_options_5 + ignore_errors: yes + +- name: networks.options (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + options: {} + register: networks_options_6 + +- name: networks.options (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + networks: + - name: "{{ network_name_1 }}" + options: {} + register: networks_options_7 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - networks_options_1 is changed + - networks_options_2 is not changed + - networks_options_3 is changed + - networks_options_4 is changed + - networks_options_5 is failed + - "'Only dict is allowed as network options' in networks_options_5.msg" + - networks_options_6 is changed + - networks_options_7 is not changed + +#################################################################### +#################################################################### +#################################################################### + +- name: Delete networks + docker_network: + name: "{{ network_name }}" + state: absent + force: yes + loop: + - "{{ network_name_1 }}" + - "{{ network_name_2 }}" + loop_control: + loop_var: network_name + ignore_errors: yes diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/options.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/options.yml new file mode 100644 index 00000000..df0a5938 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/options.yml @@ -0,0 +1,1878 @@ +--- + +- name: Registering service name + set_fact: + service_name: "{{ name_prefix ~ '-options' }}" + +- name: Registering service name + set_fact: + service_names: "{{ service_names + [service_name] }}" + +#################################################################### +## args ############################################################ +#################################################################### + +- name: args + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + args: + - sleep + - "3600" + register: args_1 + +- name: args (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + args: + - sleep + - "3600" + register: args_2 + +- name: args (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + args: + - sleep + - "3400" + register: args_3 + +- name: args (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + args: [] + register: args_4 + +- name: args (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + args: [] + register: args_5 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - args_1 is changed + - args_2 is not changed + - args_3 is changed + - args_4 is changed + - args_5 is not changed + +#################################################################### +## command ######################################################### +#################################################################### + +- name: command + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + register: command_1 + +- name: command (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + register: command_2 + +- name: command (less parameters) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -c "sleep 10m"' + register: command_3 + +- name: command (as list) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: + - "/bin/sh" + - "-c" + - "sleep 10m" + register: command_4 + +- name: command (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: [] + register: command_5 + +- name: command (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: [] + register: command_6 + +- name: command (string failure) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: yes + register: command_7 + ignore_errors: yes + +- name: command (list failure) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: + - "/bin/sh" + - yes + register: command_8 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - command_1 is changed + - command_2 is not changed + - command_3 is changed + - command_4 is not changed + - command_5 is changed + - command_6 is not changed + - command_7 is failed + - command_8 is failed + +#################################################################### +## container_labels ################################################ +#################################################################### + +- name: container_labels + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + container_labels: + test_1: "1" + test_2: "2" + register: container_labels_1 + +- name: container_labels (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + container_labels: + test_1: "1" + test_2: "2" + register: container_labels_2 + +- name: container_labels (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + container_labels: + test_1: "1" + test_2: "3" + register: container_labels_3 + +- name: container_labels (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + container_labels: {} + register: container_labels_4 + +- name: container_labels (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + container_labels: {} + register: container_labels_5 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - container_labels_1 is changed + - container_labels_2 is not changed + - container_labels_3 is changed + - container_labels_4 is changed + - container_labels_5 is not changed + +#################################################################### +## dns ############################################################# +#################################################################### + +- name: dns + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns: + - 1.1.1.1 + - 8.8.8.8 + register: dns_1 + ignore_errors: yes + +- name: dns (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns: + - 1.1.1.1 + - 8.8.8.8 + register: dns_2 + ignore_errors: yes + +- name: dns_servers (changed order) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns: + - 8.8.8.8 + - 1.1.1.1 + register: dns_3 + ignore_errors: yes + +- name: dns_servers (changed elements) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns: + - 8.8.8.8 + - 9.9.9.9 + register: dns_4 + ignore_errors: yes + +- name: dns_servers (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns: [] + register: dns_5 + ignore_errors: yes + +- name: dns_servers (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns: [] + register: dns_6 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - dns_1 is changed + - dns_2 is not changed + - dns_3 is changed + - dns_4 is changed + - dns_5 is changed + - dns_6 is not changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.6.0', '>=') +- assert: + that: + - dns_1 is failed + - "'Minimum version required' in dns_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.6.0', '<') + +#################################################################### +## dns_options ##################################################### +#################################################################### + +- name: dns_options + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns_options: + - "timeout:10" + - rotate + register: dns_options_1 + ignore_errors: yes + +- name: dns_options (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns_options: + - "timeout:10" + - rotate + register: dns_options_2 + ignore_errors: yes + +- name: dns_options (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns_options: + - "timeout:10" + - no-check-names + register: dns_options_3 + ignore_errors: yes + +- name: dns_options (order idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns_options: + - no-check-names + - "timeout:10" + register: dns_options_4 + ignore_errors: yes + +- name: dns_options (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns_options: [] + register: dns_options_5 + ignore_errors: yes + +- name: dns_options (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns_options: [] + register: dns_options_6 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - dns_options_1 is changed + - dns_options_2 is not changed + - dns_options_3 is changed + - dns_options_4 is not changed + - dns_options_5 is changed + - dns_options_6 is not changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.6.0', '>=') +- assert: + that: + - dns_options_1 is failed + - "'Minimum version required' in dns_options_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.6.0', '<') + +#################################################################### +## dns_search ###################################################### +#################################################################### + +- name: dns_search + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns_search: + - example.com + - example.org + register: dns_search_1 + ignore_errors: yes + +- name: dns_search (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns_search: + - example.com + - example.org + register: dns_search_2 + ignore_errors: yes + +- name: dns_search (different order) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns_search: + - example.org + - example.com + register: dns_search_3 + ignore_errors: yes + +- name: dns_search (changed elements) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns_search: + - ansible.com + - example.com + register: dns_search_4 + ignore_errors: yes + +- name: dns_search (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns_search: [] + register: dns_search_5 + ignore_errors: yes + +- name: dns_search (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + dns_search: [] + register: dns_search_6 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - dns_search_1 is changed + - dns_search_2 is not changed + - dns_search_3 is changed + - dns_search_4 is changed + - dns_search_5 is changed + - dns_search_6 is not changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.6.0', '>=') +- assert: + that: + - dns_search_1 is failed + - "'Minimum version required' in dns_search_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.6.0', '<') + +#################################################################### +## endpoint_mode ################################################### +#################################################################### + +- name: endpoint_mode + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + endpoint_mode: "dnsrr" + register: endpoint_mode_1 + ignore_errors: yes + +- name: endpoint_mode (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + endpoint_mode: "dnsrr" + register: endpoint_mode_2 + ignore_errors: yes + +- name: endpoint_mode (changes) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + endpoint_mode: "vip" + register: endpoint_mode_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - endpoint_mode_1 is changed + - endpoint_mode_2 is not changed + - endpoint_mode_3 is changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('3.0.0', '>=') +- assert: + that: + - endpoint_mode_1 is failed + - "'Minimum version required' in endpoint_mode_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('3.0.0', '<') + +#################################################################### +## env ############################################################# +#################################################################### + +- name: env + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + env: + - "TEST1=val1" + - "TEST2=val2" + register: env_1 + +- name: env (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + env: + TEST1: val1 + TEST2: val2 + register: env_2 + +- name: env (changes) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + env: + - "TEST1=val1" + - "TEST2=val3" + register: env_3 + +- name: env (order idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + env: + - "TEST2=val3" + - "TEST1=val1" + register: env_4 + +- name: env (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + env: [] + register: env_5 + +- name: env (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + env: [] + register: env_6 + +- name: env (fail unwrapped values) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + env: + TEST1: true + register: env_7 + ignore_errors: yes + +- name: env (fail invalid formatted string) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + env: + - "TEST1=val3" + - "TEST2" + register: env_8 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - env_1 is changed + - env_2 is not changed + - env_3 is changed + - env_4 is not changed + - env_5 is changed + - env_6 is not changed + - env_7 is failed + - env_8 is failed + +#################################################################### +## env_files ####################################################### +#################################################################### + +- name: env_files + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + env_files: + - "{{ role_path }}/files/env-file-1" + register: env_file_1 + +- name: env_files (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + env_files: + - "{{ role_path }}/files/env-file-1" + register: env_file_2 + +- name: env_files (more items) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + env_files: + - "{{ role_path }}/files/env-file-1" + - "{{ role_path }}/files/env-file-2" + register: env_file_3 + +- name: env_files (order) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + env_files: + - "{{ role_path }}/files/env-file-2" + - "{{ role_path }}/files/env-file-1" + register: env_file_4 + +- name: env_files (multiple idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + env_files: + - "{{ role_path }}/files/env-file-2" + - "{{ role_path }}/files/env-file-1" + register: env_file_5 + +- name: env_files (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + env_files: [] + register: env_file_6 + +- name: env_files (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + env_files: [] + register: env_file_7 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - env_file_1 is changed + - env_file_2 is not changed + - env_file_3 is changed + - env_file_4 is changed + - env_file_5 is not changed + - env_file_6 is changed + - env_file_7 is not changed + +################################################################### +## force_update ################################################### +################################################################### + +- name: force_update + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + args: + - sleep + - "3600" + force_update: yes + register: force_update_1 + ignore_errors: yes + +- name: force_update (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + args: + - sleep + - "3600" + force_update: yes + register: force_update_2 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - force_update_1 is changed + - force_update_2 is changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.1.0', '>=') +- assert: + that: + - force_update_1 is failed + - "'Minimum version required' in force_update_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.1.0', '<') + +#################################################################### +## groups ########################################################## +#################################################################### + +- name: groups + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + groups: + - "1234" + - "5678" + register: groups_1 + ignore_errors: yes + +- name: groups (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + groups: + - "1234" + - "5678" + register: groups_2 + ignore_errors: yes + +- name: groups (order idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + groups: + - "5678" + - "1234" + register: groups_3 + ignore_errors: yes + +- name: groups (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + groups: + - "1234" + register: groups_4 + ignore_errors: yes + +- name: groups (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + groups: [] + register: groups_5 + ignore_errors: yes + +- name: groups (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + groups: [] + register: groups_6 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - groups_1 is changed + - groups_2 is not changed + - groups_3 is not changed + - groups_4 is changed + - groups_5 is changed + - groups_6 is not changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.6.0', '>=') +- assert: + that: + - groups_1 is failed + - "'Minimum version required' in groups_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.6.0', '<') + +#################################################################### +## healthcheck ##################################################### +#################################################################### + +- name: healthcheck + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + healthcheck: + test: + - CMD + - sleep + - "1" + timeout: 2s + interval: 0h0m2s3ms4us + retries: 2 + start_period: 20s + register: healthcheck_1 + ignore_errors: yes + +- name: healthcheck (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + healthcheck: + test: + - CMD + - sleep + - 1 + timeout: 2s + interval: 0h0m2s3ms4us + retries: 2 + start_period: 20s + register: healthcheck_2 + ignore_errors: yes + +- name: healthcheck (changed) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + healthcheck: + test: + - CMD + - sleep + - "1" + timeout: 3s + interval: 0h1m2s3ms4us + retries: 3 + register: healthcheck_3 + ignore_errors: yes + +- name: healthcheck (disabled) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + healthcheck: + test: + - NONE + register: healthcheck_4 + ignore_errors: yes + +- name: healthcheck (disabled, idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + healthcheck: + test: + - NONE + register: healthcheck_5 + ignore_errors: yes + +- name: healthcheck (string in healthcheck test, changed) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + healthcheck: + test: "sleep 1" + register: healthcheck_6 + ignore_errors: yes + +- name: healthcheck (string in healthcheck test, idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + healthcheck: + test: "sleep 1" + register: healthcheck_7 + ignore_errors: yes + +- name: healthcheck (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + healthcheck: {} + register: healthcheck_8 + ignore_errors: yes + +- name: healthcheck (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + healthcheck: {} + register: healthcheck_9 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - healthcheck_1 is changed + - healthcheck_2 is not changed + - healthcheck_3 is changed + - healthcheck_4 is changed + - healthcheck_5 is not changed + - healthcheck_6 is changed + - healthcheck_7 is not changed + - healthcheck_8 is changed + - healthcheck_9 is not changed + when: docker_api_version is version('1.29', '>=') and docker_py_version is version('2.6.0', '>=') +- assert: + that: + - healthcheck_1 is failed + - "'Minimum version required' in healthcheck_1.msg" + when: docker_api_version is version('1.29', '<') or docker_py_version is version('2.6.0', '<') + +################################################################### +## hostname ####################################################### +################################################################### + +- name: hostname + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + hostname: me.example.com + register: hostname_1 + ignore_errors: yes + +- name: hostname (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + hostname: me.example.com + register: hostname_2 + ignore_errors: yes + +- name: hostname (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + hostname: me.example.org + register: hostname_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - hostname_1 is changed + - hostname_2 is not changed + - hostname_3 is changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.2.0', '>=') +- assert: + that: + - hostname_1 is failed + - "'Minimum version required' in hostname_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.2.0', '<') + +################################################################### +## hosts ########################################################## +################################################################### + +- name: hosts + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + hosts: + example.com: 1.2.3.4 + example.org: 4.3.2.1 + register: hosts_1 + ignore_errors: yes + +- name: hosts (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + hosts: + example.com: 1.2.3.4 + example.org: 4.3.2.1 + register: hosts_2 + ignore_errors: yes + +- name: hosts (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + hosts: + example.com: 1.2.3.4 + register: hosts_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - hosts_1 is changed + - hosts_2 is not changed + - hosts_3 is changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.6.0', '>=') +- assert: + that: + - hosts_1 is failed + - "'Minimum version required' in hosts_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.6.0', '<') + + +################################################################### +## image ########################################################## +################################################################### + +- name: image + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + register: image_1 + +- name: image (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + register: image_2 + +- name: image (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine_different }}" + register: image_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - image_1 is changed + - image_2 is not changed + - image_3 is changed + +#################################################################### +## labels ########################################################## +#################################################################### + +- name: labels + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + labels: + test_1: "1" + test_2: "2" + register: labels_1 + +- name: labels (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + labels: + test_1: "1" + test_2: "2" + register: labels_2 + +- name: labels (changes) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + labels: + test_1: "1" + test_2: "2" + test_3: "3" + register: labels_3 + +- name: labels (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + labels: {} + register: labels_4 + +- name: labels (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + labels: {} + register: labels_5 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - labels_1 is changed + - labels_2 is not changed + - labels_3 is changed + - labels_4 is changed + - labels_5 is not changed + +################################################################### +## mode ########################################################### +################################################################### + +- name: mode + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mode: "replicated" + replicas: 1 + register: mode_1 + +- name: mode (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mode: "replicated" + replicas: 1 + register: mode_2 + +- name: mode (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + mode: "global" + replicas: 1 + register: mode_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - mode_1 is changed + - mode_2 is not changed + - mode_3 is changed + +#################################################################### +## stop_grace_period ############################################### +#################################################################### + +- name: stop_grace_period + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + stop_grace_period: 60s + register: stop_grace_period_1 + +- name: stop_grace_period (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + stop_grace_period: 60s + register: stop_grace_period_2 + +- name: stop_grace_period (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + stop_grace_period: 1m30s + register: stop_grace_period_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - stop_grace_period_1 is changed + - stop_grace_period_2 is not changed + - stop_grace_period_3 is changed + +#################################################################### +## stop_signal ##################################################### +#################################################################### + +- name: stop_signal + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + stop_signal: "30" + register: stop_signal_1 + ignore_errors: yes + +- name: stop_signal (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + stop_signal: "30" + register: stop_signal_2 + ignore_errors: yes + +- name: stop_signal (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + stop_signal: "9" + register: stop_signal_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - stop_signal_1 is changed + - stop_signal_2 is not changed + - stop_signal_3 is changed + when: docker_api_version is version('1.28', '>=') and docker_py_version is version('2.6.0', '>=') +- assert: + that: + - stop_signal_1 is failed + - "'Minimum version required' in stop_signal_1.msg" + when: docker_api_version is version('1.28', '<') or docker_py_version is version('2.6.0', '<') + +#################################################################### +## publish ######################################################### +#################################################################### + +- name: publish + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + publish: + - protocol: tcp + published_port: 60001 + target_port: 60001 + - protocol: udp + published_port: 60002 + target_port: 60002 + register: publish_1 + ignore_errors: yes + +- name: publish (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + publish: + - protocol: udp + published_port: 60002 + target_port: 60002 + - published_port: 60001 + target_port: 60001 + register: publish_2 + ignore_errors: yes + +- name: publish (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + publish: + - protocol: tcp + published_port: 60002 + target_port: 60003 + - protocol: udp + published_port: 60001 + target_port: 60001 + register: publish_3 + ignore_errors: yes + +- name: publish (mode) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + publish: + - protocol: tcp + published_port: 60002 + target_port: 60003 + mode: host + - protocol: udp + published_port: 60001 + target_port: 60001 + mode: host + register: publish_4 + ignore_errors: yes + +- name: publish (mode idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + publish: + - protocol: udp + published_port: 60001 + target_port: 60001 + mode: host + - protocol: tcp + published_port: 60002 + target_port: 60003 + mode: host + register: publish_5 + ignore_errors: yes + +- name: publish (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + publish: [] + register: publish_6 + ignore_errors: yes + +- name: publish (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + publish: [] + register: publish_7 + ignore_errors: yes + +- name: publish (publishes the same port with both protocols) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + publish: + - protocol: udp + published_port: 60001 + target_port: 60001 + mode: host + - protocol: tcp + published_port: 60001 + target_port: 60001 + mode: host + register: publish_8 + ignore_errors: yes +- name: gather service info + docker_swarm_service_info: + name: "{{ service_name }}" + register: publish_8_info + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - publish_1 is changed + - publish_2 is not changed + - publish_3 is changed + - publish_4 is changed + - publish_5 is not changed + - publish_6 is changed + - publish_7 is not changed + - publish_8 is changed + - (publish_8_info.service.Endpoint.Ports | length) == 2 + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('3.0.0', '>=') +- assert: + that: + - publish_1 is failed + - "'Minimum version required' in publish_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('3.0.0', '<') + +################################################################### +## read_only ###################################################### +################################################################### + +- name: read_only + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + read_only: true + register: read_only_1 + ignore_errors: yes + +- name: read_only (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + read_only: true + register: read_only_2 + ignore_errors: yes + +- name: read_only (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + read_only: false + register: read_only_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - read_only_1 is changed + - read_only_2 is not changed + - read_only_3 is changed + when: docker_api_version is version('1.28', '>=') and docker_py_version is version('2.6.0', '>=') +- assert: + that: + - read_only_1 is failed + - "'Minimum version required' in read_only_1.msg" + when: docker_api_version is version('1.28', '<') or docker_py_version is version('2.6.0', '<') + +################################################################### +## replicas ####################################################### +################################################################### + +- name: replicas + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + replicas: 2 + register: replicas_1 + +- name: replicas (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + replicas: 2 + register: replicas_2 + +- name: replicas (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + replicas: 3 + register: replicas_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - replicas_1 is changed + - replicas_2 is not changed + - replicas_3 is changed + +################################################################### +# resolve_image ################################################### +################################################################### + +- name: resolve_image (false) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -v -c "sleep 10m"' + resolve_image: false + register: resolve_image_1 + +- name: resolve_image (false idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -v -c "sleep 10m"' + resolve_image: false + register: resolve_image_2 + +- name: resolve_image (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -v -c "sleep 10m"' + resolve_image: true + register: resolve_image_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - resolve_image_1 is changed + - resolve_image_2 is not changed + - resolve_image_3 is changed + when: docker_api_version is version('1.30', '>=') and docker_py_version is version('3.2.0', '>=') +- assert: + that: + - resolve_image_1 is changed + - resolve_image_2 is not changed + - resolve_image_3 is failed + - "('version is ' ~ docker_py_version ~ ' ') in resolve_image_3.msg" + - "'Minimum version required is 3.2.0 ' in resolve_image_3.msg" + when: docker_api_version is version('1.30', '<') or docker_py_version is version('3.2.0', '<') + +################################################################### +# tty ############################################################# +################################################################### + +- name: tty + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + tty: yes + register: tty_1 + ignore_errors: yes + +- name: tty (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + tty: yes + register: tty_2 + ignore_errors: yes + +- name: tty (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + tty: no + register: tty_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - tty_1 is changed + - tty_2 is not changed + - tty_3 is changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.4.0', '>=') +- assert: + that: + - tty_1 is failed + - "'Minimum version required' in tty_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.4.0', '<') + +################################################################### +## user ########################################################### +################################################################### + +- name: user + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + user: "operator" + register: user_1 + +- name: user (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + user: "operator" + register: user_2 + +- name: user (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + user: "root" + register: user_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - user_1 is changed + - user_2 is not changed + - user_3 is changed + +#################################################################### +## working_dir ##################################################### +#################################################################### + +- name: working_dir + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + working_dir: /tmp + register: working_dir_1 + +- name: working_dir (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + working_dir: /tmp + register: working_dir_2 + +- name: working_dir (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + working_dir: / + register: working_dir_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - working_dir_1 is changed + - working_dir_2 is not changed + - working_dir_3 is changed + +#################################################################### +## init ############################################################ +#################################################################### + +- name: init + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + init: true + register: init_1 + +- name: init (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + init: true + register: init_2 + +- name: init (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + init: false + register: init_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - init_1 is changed + - init_2 is not changed + - init_3 is changed + when: docker_api_version is version('1.37', '>=') + +- assert: + that: + - init_1 is failed + - "('version is ' ~ docker_api_version ~'. Minimum version required is 1.37') in hosts_1.msg" + when: docker_api_version is version('1.37', '<') diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/placement.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/placement.yml new file mode 100644 index 00000000..71873b20 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/placement.yml @@ -0,0 +1,202 @@ +--- + +- name: Registering service name + set_fact: + service_name: "{{ name_prefix ~ '-placement' }}" + +- name: Registering service name + set_fact: + service_names: "{{ service_names + [service_name] }}" + + +#################################################################### +## placement.preferences ########################################### +#################################################################### + +- name: placement.preferences + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + placement: + preferences: + - spread: "node.labels.test" + register: placement_preferences_1 + ignore_errors: yes + +- name: placement.preferences (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + placement: + preferences: + - spread: "node.labels.test" + register: placement_preferences_2 + ignore_errors: yes + +- name: placement.preferences (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + placement: + preferences: + - spread: "node.labels.test2" + register: placement_preferences_3 + ignore_errors: yes + +- name: placement.preferences (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + placement: + preferences: [] + register: placement_preferences_4 + ignore_errors: yes + +- name: placement.preferences (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + placement: + preferences: [] + register: placement_preferences_5 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - placement_preferences_1 is changed + - placement_preferences_2 is not changed + - placement_preferences_3 is changed + - placement_preferences_4 is changed + - placement_preferences_5 is not changed + when: docker_api_version is version('1.27', '>=') and docker_py_version is version('2.4.0', '>=') +- assert: + that: + - placement_preferences_1 is failed + - "'Minimum version required' in placement_preferences_1.msg" + when: docker_api_version is version('1.27', '<') or docker_py_version is version('2.4.0', '<') + +#################################################################### +## placement.constraints ##################################################### +#################################################################### + +- name: placement.constraints + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + placement: + constraints: + - "node.role == manager" + register: constraints_1 + ignore_errors: yes + +- name: placement.constraints (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + placement: + constraints: + - "node.role == manager" + register: constraints_2 + ignore_errors: yes + +- name: placement.constraints (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + placement: + constraints: + - "node.role == worker" + register: constraints_3 + ignore_errors: yes + +- name: placement.constraints (add) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + placement: + constraints: + - "node.role == worker" + - "node.label != non_existent_label" + register: constraints_4 + ignore_errors: yes + +- name: placement.constraints (order idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + placement: + constraints: + - "node.label != non_existent_label" + - "node.role == worker" + register: constraints_5 + ignore_errors: yes + +- name: placement.constraints (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + placement: + constraints: [] + register: constraints_6 + ignore_errors: yes + +- name: placement.constraints (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + placement: + constraints: [] + register: constraints_7 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - constraints_1 is changed + - constraints_2 is not changed + - constraints_3 is changed + - constraints_4 is changed + - constraints_5 is not changed + - constraints_6 is changed + - constraints_7 is not changed + when: docker_api_version is version('1.27', '>=') and docker_py_version is version('2.4.0', '>=') +- assert: + that: + - constraints_1 is failed + - "'Minimum version required' in constraints_1.msg" + when: docker_api_version is version('1.27', '<') or docker_py_version is version('2.4.0', '<') diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/resources.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/resources.yml new file mode 100644 index 00000000..8a481067 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/resources.yml @@ -0,0 +1,193 @@ +--- + +- name: Registering service name + set_fact: + service_name: "{{ name_prefix ~ '-resources' }}" + +- name: Registering service name + set_fact: + service_names: "{{ service_names + [service_name] }}" + +#################################################################### +## limits.cpus ##################################################### +#################################################################### + +- name: limits.cpus + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + limits: + cpus: 1 + register: limit_cpu_1 + +- name: limits.cpus (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + limits: + cpus: 1 + register: limit_cpu_2 + +- name: limits.cpus (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + limits: + cpus: 0.5 + register: limit_cpu_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - limit_cpu_1 is changed + - limit_cpu_2 is not changed + - limit_cpu_3 is changed + +################################################################### +## limits.memory ################################################## +################################################################### + +- name: limits.memory + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + limits: + memory: 64M + register: limit_memory_1 + +- name: limits.memory (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + limits: + memory: 64M + register: limit_memory_2 + +- name: limits.memory (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + limits: + memory: 32M + register: limit_memory_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - limit_memory_1 is changed + - limit_memory_2 is not changed + - limit_memory_3 is changed + +################################################################### +## reservations.cpus ############################################## +################################################################### + +- name: reserve_cpu + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + reservations: + cpus: 1 + register: reserve_cpu_1 + +- name: reserve_cpu (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + reservations: + cpus: 1 + register: reserve_cpu_2 + +- name: reserve_cpu (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + reservations: + cpus: 0.5 + register: reserve_cpu_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - reserve_cpu_1 is changed + - reserve_cpu_2 is not changed + - reserve_cpu_3 is changed + +################################################################### +## reservations.memory ############################################ +################################################################### + +- name: reservations.memory + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + reservations: + memory: 64M + register: reserve_memory_1 + +- name: reservations.memory (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + reservations: + memory: 64M + register: reserve_memory_2 + +- name: reservations.memory (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + reservations: + memory: 32M + register: reserve_memory_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - reserve_memory_1 is changed + - reserve_memory_2 is not changed + - reserve_memory_3 is changed diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/restart_config.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/restart_config.yml new file mode 100644 index 00000000..cf7e1668 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/restart_config.yml @@ -0,0 +1,193 @@ +--- + +- name: Registering service name + set_fact: + service_name: "{{ name_prefix ~ '-restart_config' }}" + +- name: Registering service name + set_fact: + service_names: "{{ service_names + [service_name] }}" + +################################################################### +## restart_config.condition ####################################### +################################################################### + +- name: restart_config.condition + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + restart_config: + condition: "on-failure" + register: restart_policy_1 + +- name: restart_config.condition (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + restart_config: + condition: "on-failure" + register: restart_policy_2 + +- name: restart_config.condition (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + restart_config: + condition: "any" + register: restart_policy_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - restart_policy_1 is changed + - restart_policy_2 is not changed + - restart_policy_3 is changed + +################################################################### +## restart_config.max_attempts #################################### +################################################################### + +- name: restart_config.max_attempts + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + restart_config: + max_attempts: 1 + register: restart_policy_attempts_1 + +- name: restart_config.max_attempts (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + restart_config: + max_attempts: 1 + register: restart_policy_attempts_2 + +- name: restart_config.max_attempts (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + restart_config: + max_attempts: 2 + register: restart_policy_attempts_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - restart_policy_attempts_1 is changed + - restart_policy_attempts_2 is not changed + - restart_policy_attempts_3 is changed + +################################################################### +## restart_config.delay ########################################### +################################################################### + +- name: restart_config.delay + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + restart_config: + delay: 5s + register: restart_policy_delay_1 + +- name: restart_config.delay (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + restart_config: + delay: 5s + register: restart_policy_delay_2 + +- name: restart_config.delay (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + restart_config: + delay: 10s + register: restart_policy_delay_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - restart_policy_delay_1 is changed + - restart_policy_delay_2 is not changed + - restart_policy_delay_3 is changed + +################################################################### +## restart_config.window ########################################## +################################################################### + +- name: restart_config.window + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + restart_config: + window: 10s + register: restart_policy_window_1 + +- name: restart_config.window (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + restart_config: + window: 10s + register: restart_policy_window_2 + +- name: restart_config.window (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + restart_config: + window: 20s + register: restart_policy_window_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - restart_policy_window_1 is changed + - restart_policy_window_2 is not changed + - restart_policy_window_3 is changed diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/rollback_config.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/rollback_config.yml new file mode 100644 index 00000000..8d97d7e9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/rollback_config.yml @@ -0,0 +1,339 @@ +--- + +- name: Registering service name + set_fact: + service_name: "{{ name_prefix ~ '-rollback_config' }}" + +- name: Registering service name + set_fact: + service_names: "{{ service_names + [service_name] }}" + +################################################################### +## rollback_config.delay ############################################ +################################################################### + +- name: rollback_config.delay + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + delay: 5s + register: rollback_config_delay_1 + ignore_errors: yes + +- name: rollback_config.delay (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + delay: 5s + register: rollback_config_delay_2 + ignore_errors: yes + +- name: rollback_config.delay (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + delay: 12s + register: rollback_config_delay_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - rollback_config_delay_1 is changed + - rollback_config_delay_2 is not changed + - rollback_config_delay_3 is changed + when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=') +- assert: + that: + - rollback_config_delay_1 is failed + - "'Minimum version required' in rollback_config_delay_1.msg" + when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<') + +################################################################### +## rollback_config.failure_action ################################### +################################################################### + +- name: rollback_config.failure_action + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + failure_action: "pause" + register: rollback_config_failure_action_1 + ignore_errors: yes + +- name: rollback_config.failure_action (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + failure_action: "pause" + register: rollback_config_failure_action_2 + ignore_errors: yes + +- name: rollback_config.failure_action (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + failure_action: "continue" + register: rollback_config_failure_action_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - rollback_config_failure_action_1 is changed + - rollback_config_failure_action_2 is not changed + - rollback_config_failure_action_3 is changed + when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=') +- assert: + that: + - rollback_config_failure_action_1 is failed + - "'Minimum version required' in rollback_config_failure_action_1.msg" + when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<') + +################################################################### +## rollback_config.max_failure_ratio ################################ +################################################################### + +- name: rollback_config.max_failure_ratio + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + max_failure_ratio: 0.25 + register: rollback_config_max_failure_ratio_1 + ignore_errors: yes + +- name: rollback_config.max_failure_ratio (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + max_failure_ratio: 0.25 + register: rollback_config_max_failure_ratio_2 + ignore_errors: yes + +- name: rollback_config.max_failure_ratio (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + max_failure_ratio: 0.50 + register: rollback_config_max_failure_ratio_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - rollback_config_max_failure_ratio_1 is changed + - rollback_config_max_failure_ratio_2 is not changed + - rollback_config_max_failure_ratio_3 is changed + when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=') +- assert: + that: + - rollback_config_max_failure_ratio_1 is failed + - "'Minimum version required' in rollback_config_max_failure_ratio_1.msg" + when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<') + +################################################################### +# rollback_config.monitor ########################################### +################################################################### + +- name: rollback_config.monitor + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + monitor: 10s + register: rollback_config_monitor_1 + ignore_errors: yes + +- name: rollback_config.monitor (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + monitor: 10s + register: rollback_config_monitor_2 + ignore_errors: yes + +- name: rollback_config.monitor (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + monitor: 60s + register: rollback_config_monitor_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - rollback_config_monitor_1 is changed + - rollback_config_monitor_2 is not changed + - rollback_config_monitor_3 is changed + when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=') +- assert: + that: + - rollback_config_monitor_1 is failed + - "'Minimum version required' in rollback_config_monitor_1.msg" + when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<') + +################################################################### +# rollback_config.order ############################################# +################################################################### + +- name: rollback_config.order + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + order: "start-first" + register: rollback_config_order_1 + ignore_errors: yes + +- name: rollback_config.order (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + order: "start-first" + register: rollback_config_order_2 + ignore_errors: yes + +- name: rollback_config.order (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + order: "stop-first" + register: rollback_config_order_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - rollback_config_order_1 is changed + - rollback_config_order_2 is not changed + - rollback_config_order_3 is changed + when: docker_api_version is version('1.29', '>=') and docker_py_version is version('3.5.0', '>=') +- assert: + that: + - rollback_config_order_1 is failed + - "'Minimum version required' in rollback_config_order_1.msg" + when: docker_api_version is version('1.29', '<') or docker_py_version is version('3.5.0', '<') + +################################################################### +## rollback_config.parallelism ###################################### +################################################################### + +- name: rollback_config.parallelism + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + parallelism: 2 + register: rollback_config_parallelism_1 + ignore_errors: yes + +- name: rollback_config.parallelism (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + parallelism: 2 + register: rollback_config_parallelism_2 + ignore_errors: yes + +- name: rollback_config.parallelism (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + rollback_config: + parallelism: 1 + register: rollback_config_parallelism_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - rollback_config_parallelism_1 is changed + - rollback_config_parallelism_2 is not changed + - rollback_config_parallelism_3 is changed + when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=') +- assert: + that: + - rollback_config_parallelism_1 is failed + - "'Minimum version required' in rollback_config_parallelism_1.msg" + when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<') diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/secrets.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/secrets.yml new file mode 100644 index 00000000..bcd1f269 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/secrets.yml @@ -0,0 +1,411 @@ +--- + +- name: Registering container name + set_fact: + service_name: "{{ name_prefix ~ '-secrets' }}" + secret_name_1: "{{ name_prefix ~ '-secret-1' }}" + secret_name_2: "{{ name_prefix ~ '-secret-2' }}" + +- name: Registering container name + set_fact: + secret_names: "{{ secret_names + [secret_name_1, secret_name_2] }}" + +- docker_secret: + name: "{{ secret_name_1 }}" + data: "secret1" + state: "present" + register: "secret_result_1" + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.1.0', '>=') + +- docker_secret: + name: "{{ secret_name_2 }}" + data: "secret2" + state: "present" + register: "secret_result_2" + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.1.0', '>=') + +#################################################################### +## secrets ######################################################### +#################################################################### + +- name: secrets + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + filename: "/run/secrets/{{ secret_name_1 }}.txt" + register: secrets_1 + ignore_errors: yes + +- name: secrets (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_name: "{{ secret_name_1 }}" + filename: "/run/secrets/{{ secret_name_1 }}.txt" + register: secrets_2 + ignore_errors: yes + +- name: secrets (add) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + filename: "/run/secrets/{{ secret_name_1 }}.txt" + - secret_name: "{{ secret_name_2 }}" + filename: "/run/secrets/{{ secret_name_2 }}.txt" + register: secrets_3 + ignore_errors: yes + +- name: secrets (add idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_name: "{{ secret_name_1 }}" + filename: "/run/secrets/{{ secret_name_1 }}.txt" + - secret_id: "{{ secret_result_2.secret_id|default('') }}" + secret_name: "{{ secret_name_2 }}" + filename: "/run/secrets/{{ secret_name_2 }}.txt" + register: secrets_4 + ignore_errors: yes + +- name: secrets (add idempotency no id) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_name: "{{ secret_name_1 }}" + filename: "/run/secrets/{{ secret_name_1 }}.txt" + - secret_name: "{{ secret_name_2 }}" + filename: "/run/secrets/{{ secret_name_2 }}.txt" + register: secrets_5 + ignore_errors: yes + +- name: secrets (order idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_name: "{{ secret_name_2 }}" + filename: "/run/secrets/{{ secret_name_2 }}.txt" + - secret_name: "{{ secret_name_1 }}" + filename: "/run/secrets/{{ secret_name_1 }}.txt" + register: secrets_6 + ignore_errors: yes + +- name: secrets (empty) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: [] + register: secrets_7 + ignore_errors: yes + +- name: secrets (empty idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: [] + register: secrets_8 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - secrets_1 is changed + - secrets_2 is not changed + - secrets_3 is changed + - secrets_4 is not changed + - secrets_5 is not changed + - secrets_6 is not changed + - secrets_7 is changed + - secrets_8 is not changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.4.0', '>=') +- assert: + that: + - secrets_1 is failed + - "'Minimum version required' in secrets_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.4.0', '<') + +#################################################################### +## secrets (uid) ################################################### +#################################################################### + +- name: secrets (uid int) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + uid: 1000 + register: secrets_1 + ignore_errors: yes + +- name: secrets (uid int idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + uid: 1000 + register: secrets_2 + ignore_errors: yes + +- name: secrets (uid int change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + uid: 1002 + register: secrets_3 + ignore_errors: yes + +- name: secrets (uid str) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + uid: "1001" + register: secrets_4 + ignore_errors: yes + +- name: secrets (uid str idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + uid: "1001" + register: secrets_5 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - secrets_1 is changed + - secrets_2 is not changed + - secrets_3 is changed + - secrets_4 is changed + - secrets_5 is not changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.4.0', '>=') +- assert: + that: + - secrets_1 is failed + - "'Minimum version required' in secrets_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.4.0', '<') + +#################################################################### +## secrets (gid) ################################################### +#################################################################### + +- name: secrets (gid int) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + gid: 1001 + register: secrets_1 + ignore_errors: yes + +- name: secrets (gid int idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + gid: 1001 + register: secrets_2 + ignore_errors: yes + +- name: secrets (gid int change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + gid: 1002 + register: secrets_3 + ignore_errors: yes + +- name: secrets (gid str) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + gid: "1003" + register: secrets_4 + ignore_errors: yes + +- name: secrets (gid str idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + gid: "1003" + register: secrets_5 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - secrets_1 is changed + - secrets_2 is not changed + - secrets_3 is changed + - secrets_4 is changed + - secrets_5 is not changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.4.0', '>=') +- assert: + that: + - secrets_1 is failed + - "'Minimum version required' in secrets_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.4.0', '<') + +#################################################################### +## secrets (mode) ################################################## +#################################################################### + +- name: secrets (mode) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + mode: 0600 + register: secrets_1 + ignore_errors: yes + +- name: secrets (mode idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + mode: 0600 + register: secrets_2 + ignore_errors: yes + +- name: secrets (mode change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + secrets: + - secret_id: "{{ secret_result_1.secret_id|default('') }}" + secret_name: "{{ secret_name_1 }}" + mode: 0777 + register: secrets_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - secrets_1 is changed + - secrets_2 is not changed + - secrets_3 is changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.4.0', '>=') +- assert: + that: + - secrets_1 is failed + - "'Minimum version required' in secrets_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.4.0', '<') + +#################################################################### +#################################################################### +#################################################################### + +- name: Delete secrets + docker_secret: + name: "{{ secret_name }}" + state: absent + force: yes + loop: + - "{{ secret_name_1 }}" + - "{{ secret_name_2 }}" + loop_control: + loop_var: secret_name + ignore_errors: yes + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.1.0', '>=') diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/update_config.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/update_config.yml new file mode 100644 index 00000000..5eccde28 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/update_config.yml @@ -0,0 +1,345 @@ +--- + +- name: Registering service name + set_fact: + service_name: "{{ name_prefix ~ '-update_config' }}" + +- name: Registering service name + set_fact: + service_names: "{{ service_names + [service_name] }}" + +################################################################### +## update_config.delay ############################################ +################################################################### + +- name: update_config.delay + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + delay: 5s + register: update_delay_1 + +- name: update_config.delay (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + delay: 5s + register: update_delay_2 + +- name: update_config.delay (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + delay: 12s + register: update_delay_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - update_delay_1 is changed + - update_delay_2 is not changed + - update_delay_3 is changed + +################################################################### +## update_config.failure_action ################################### +################################################################### + +- name: update_config.failure_action + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + failure_action: "pause" + register: update_failure_action_1 + +- name: update_config.failure_action (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + failure_action: "pause" + register: update_failure_action_2 + +- name: update_config.failure_action (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + failure_action: "continue" + register: update_failure_action_3 + +- name: update_config.failure_action (rollback) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + failure_action: "rollback" + register: update_failure_action_4 + ignore_errors: yes + +- name: update_config.failure_action (rollback idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_failure_action: "rollback" + register: update_failure_action_5 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - update_failure_action_1 is changed + - update_failure_action_2 is not changed + - update_failure_action_3 is changed + +- assert: + that: + - update_failure_action_4 is changed + - update_failure_action_5 is not changed + when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=') + +- assert: + that: + - update_failure_action_4 is failed + - "'Minimum version required' in update_failure_action_4.msg" + when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<') + +################################################################### +## update_config.max_failure_ratio ################################ +################################################################### + +- name: update_config.max_failure_ratio + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + max_failure_ratio: 0.25 + register: update_max_failure_ratio_1 + ignore_errors: yes + +- name: update_config.max_failure_ratio (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + max_failure_ratio: 0.25 + register: update_max_failure_ratio_2 + ignore_errors: yes + +- name: update_config.max_failure_ratio (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + max_failure_ratio: 0.50 + register: update_max_failure_ratio_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - update_max_failure_ratio_1 is changed + - update_max_failure_ratio_2 is not changed + - update_max_failure_ratio_3 is changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.1.0', '>=') +- assert: + that: + - update_max_failure_ratio_1 is failed + - "'Minimum version required' in update_max_failure_ratio_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.1.0', '<') + +################################################################### +# update_config.monitor ########################################### +################################################################### + +- name: update_config.monitor + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + monitor: 10s + register: update_monitor_1 + ignore_errors: yes + +- name: update_config.monitor (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + monitor: 10s + register: update_monitor_2 + ignore_errors: yes + +- name: update_config.monitor (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + monitor: 60s + register: update_monitor_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - update_monitor_1 is changed + - update_monitor_2 is not changed + - update_monitor_3 is changed + when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.1.0', '>=') +- assert: + that: + - update_monitor_1 is failed + - "'Minimum version required' in update_monitor_1.msg" + when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.1.0', '<') + +################################################################### +# update_config.order ############################################# +################################################################### + +- name: update_config.order + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + order: "start-first" + register: update_order_1 + ignore_errors: yes + +- name: update_config.order (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + order: "start-first" + register: update_order_2 + ignore_errors: yes + +- name: update_config.order (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + order: "stop-first" + register: update_order_3 + ignore_errors: yes + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - update_order_1 is changed + - update_order_2 is not changed + - update_order_3 is changed + when: docker_api_version is version('1.29', '>=') and docker_py_version is version('2.7.0', '>=') +- assert: + that: + - update_order_1 is failed + - "'Minimum version required' in update_order_1.msg" + when: docker_api_version is version('1.29', '<') or docker_py_version is version('2.7.0', '<') + +################################################################### +## update_config.parallelism ###################################### +################################################################### + +- name: update_config.parallelism + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + parallelism: 2 + register: update_parallelism_1 + +- name: update_config.parallelism (idempotency) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + parallelism: 2 + register: update_parallelism_2 + +- name: update_config.parallelism (change) + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + resolve_image: no + command: '/bin/sh -v -c "sleep 10m"' + update_config: + parallelism: 1 + register: update_parallelism_3 + +- name: cleanup + docker_swarm_service: + name: "{{ service_name }}" + state: absent + diff: no + +- assert: + that: + - update_parallelism_1 is changed + - update_parallelism_2 is not changed + - update_parallelism_3 is changed diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/vars/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/vars/main.yml new file mode 100644 index 00000000..8ec7ffeb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/vars/main.yml @@ -0,0 +1,54 @@ +--- + +service_expected_output: + args: [sleep, '1800'] + configs: null + constraints: null + container_labels: null + command: null + dns: null + dns_options: null + dns_search: null + endpoint_mode: vip + env: null + force_update: null + groups: null + healthcheck: null + healthcheck_disabled: null + hostname: null + hosts: null + image: "{{ docker_test_image_busybox }}" + labels: null + limit_cpu: null + limit_memory: null + log_driver: null + log_driver_options: null + mode: global + mounts: null + networks: null + secrets: null + stop_grace_period: null + stop_signal: null + placement_preferences: null + publish: + - {mode: null, protocol: tcp, published_port: 60001, target_port: 60001} + - {mode: null, protocol: udp, published_port: 60001, target_port: 60001} + read_only: null + replicas: null + reserve_cpu: null + reserve_memory: null + restart_policy: null + restart_policy_attempts: null + restart_policy_delay: null + restart_policy_window: null + rollback_config: null + tty: null + update_delay: null + update_failure_action: null + update_max_failure_ratio: null + update_monitor: null + update_order: null + update_parallelism: null + user: null + working_dir: null + init: null diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/aliases new file mode 100644 index 00000000..cdf1b9b1 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/tasks/main.yml new file mode 100644 index 00000000..8350e901 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/tasks/main.yml @@ -0,0 +1,11 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- include_tasks: test_docker_swarm_service_info.yml + when: docker_py_version is version('2.0.0', '>=') and docker_api_version is version('1.24', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_swarm_service_info tests!" + when: not(docker_py_version is version('2.0.0', '>=') and docker_api_version is version('1.24', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/tasks/test_docker_swarm_service_info.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/tasks/test_docker_swarm_service_info.yml new file mode 100644 index 00000000..2b5b882c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/tasks/test_docker_swarm_service_info.yml @@ -0,0 +1,83 @@ +--- + +- name: Generate service base name + set_fact: + service_base_name: "{{ 'ansible-test-%0x' % ((2**32) | random) }}" + +- name: Registering service names + set_fact: + service_name: "{{ service_base_name ~ '-1' }}" + +- block: + - name: Make sure we're not already using Docker swarm + docker_swarm: + state: absent + force: true + + - name: Try to get docker_swarm_service_info when docker is not running in swarm mode + docker_swarm_service_info: + name: "{{ service_name }}" + ignore_errors: yes + register: output + + - name: assert failure when called when swarm is not in use or not run on manager node + assert: + that: + - 'output is failed' + - 'output.msg == "Error running docker swarm module: must run on swarm manager node"' + + - name: Create a Swarm cluster + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" + register: output + + - name: Create services + docker_swarm_service: + name: "{{ service_name }}" + image: "{{ docker_test_image_alpine }}" + + - name: Try to get docker_swarm_service_info for a single service + docker_swarm_service_info: + name: "{{ service_name }}" + register: output + + - name: assert reading reading service info + assert: + that: + - 'output.exists == true' + - 'output.service.ID is string' + - 'output.service.Spec.Name == service_name' + + - name: Create random name + set_fact: + random_service_name: "{{ 'random-service-%0x' % ((2**32) | random) }}" + + - name: Try to get docker_swarm_service_info using random service name as parameter + docker_swarm_service_info: + name: "{{ random_service_name }}" + register: output + + - name: assert reading reading service info + assert: + that: + - 'output.service is none' + - 'output.exists == false' + + always: + - name: Remove services + docker_swarm_service: + name: "{{ service_name }}" + state: absent + ignore_errors: yes + + - name: Remove swarm + docker_swarm: + state: absent + force: true + + # Maximum of 1.24 (docker API version for docker_swarm_service_info) and 1.25 (docker API version for docker_swarm) is 1.25 + when: docker_py_version is version('2.0.2', '>=') and docker_api_version is version('1.25', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_swarm_service_info tests!" + when: not(docker_py_version is version('2.0.2', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/aliases new file mode 100644 index 00000000..02b78723 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/aliases @@ -0,0 +1,2 @@ +shippable/posix/group4 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/main.yml new file mode 100644 index 00000000..04baaadb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/main.yml @@ -0,0 +1,30 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Create random name prefix + set_fact: + name_prefix: "{{ 'ansible-test-%0x' % ((2**32) | random) }}" + vnames: [] + +- debug: + msg: "Using name prefix {{ name_prefix }}" + +- block: + - include_tasks: run-test.yml + with_fileglob: + - "tests/*.yml" + + always: + - name: "Make sure all volumes are removed" + docker_volume: + name: "{{ item }}" + state: absent + with_items: "{{ vnames }}" + + when: docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.20', '>=') # FIXME: find out API version! + +- fail: msg="Too old docker / docker-py version to run docker_volume tests!" + when: not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/run-test.yml new file mode 100644 index 00000000..a2999370 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/run-test.yml @@ -0,0 +1,3 @@ +--- +- name: "Loading tasks from {{ item }}" + include_tasks: "{{ item }}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/tests/basic.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/tests/basic.yml new file mode 100644 index 00000000..7ee56261 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/tests/basic.yml @@ -0,0 +1,177 @@ +--- +- name: Registering volume name + set_fact: + vname: "{{ name_prefix ~ '-basic' }}" +- name: Registering container name + set_fact: + vnames: "{{ vnames + [vname] }}" + +#################################################################### +## basic ########################################################### +#################################################################### + +- name: Create a volume + docker_volume: + name: "{{ vname }}" + register: create_1 + +- name: Create a volume (idempotency) + docker_volume: + name: "{{ vname }}" + register: create_2 + +- name: "Create a volume (recreate: options-changed)" + docker_volume: + name: "{{ vname }}" + recreate: options-changed + register: create_3 + +- name: "Create a volume (recreate: always)" + docker_volume: + name: "{{ vname }}" + recreate: always + register: create_4 + +- name: Remove a volume + docker_volume: + name: "{{ vname }}" + state: absent + register: absent_1 + +- name: Remove a volume (idempotency) + docker_volume: + name: "{{ vname }}" + state: absent + register: absent_2 + +- assert: + that: + - create_1 is changed + - create_2 is not changed + - create_3 is not changed + - create_4 is changed + - absent_1 is changed + - absent_2 is not changed + +#################################################################### +## driver_options ################################################## +#################################################################### + +- name: Create a volume with options + docker_volume: + name: "{{ vname }}" + driver: local + driver_options: + type: tempfs + device: tmpfs + o: size=100m,uid=1000 + register: driver_options_1 + +- name: Create a volume with options (idempotency) + docker_volume: + name: "{{ vname }}" + driver: local + driver_options: + type: tempfs + device: tmpfs + o: size=100m,uid=1000 + register: driver_options_2 + +- name: Create a volume with options (changed) + docker_volume: + name: "{{ vname }}" + driver: local + driver_options: + type: tempfs + device: tmpfs + o: size=200m,uid=1000 + register: driver_options_3 + +- name: "Create a volume with options (changed, recreate: options-changed)" + docker_volume: + name: "{{ vname }}" + driver: local + driver_options: + type: tempfs + device: tmpfs + o: size=200m,uid=1000 + recreate: options-changed + register: driver_options_4 + +- name: Cleanup + docker_volume: + name: "{{ vname }}" + state: absent + +- assert: + that: + - driver_options_1 is changed + - driver_options_2 is not changed + - driver_options_3 is not changed + - driver_options_4 is changed + +#################################################################### +## labels ########################################################## +#################################################################### + +- name: Create a volume with labels + docker_volume: + name: "{{ vname }}" + labels: + ansible.test.1: hello + ansible.test.2: world + register: driver_labels_1 + +- name: Create a volume with labels (idempotency) + docker_volume: + name: "{{ vname }}" + labels: + ansible.test.2: world + ansible.test.1: hello + register: driver_labels_2 + +- name: Create a volume with labels (less) + docker_volume: + name: "{{ vname }}" + labels: + ansible.test.1: hello + register: driver_labels_3 + +- name: "Create a volume with labels (less, recreate: options-changed)" + docker_volume: + name: "{{ vname }}" + labels: + ansible.test.1: hello + recreate: options-changed + register: driver_labels_4 + +- name: Create a volume with labels (more) + docker_volume: + name: "{{ vname }}" + labels: + ansible.test.1: hello + ansible.test.3: ansible + register: driver_labels_5 + +- name: "Create a volume with labels (more, recreate: options-changed)" + docker_volume: + name: "{{ vname }}" + labels: + ansible.test.1: hello + ansible.test.3: ansible + recreate: options-changed + register: driver_labels_6 + +- name: Cleanup + docker_volume: + name: "{{ vname }}" + state: absent + +- assert: + that: + - driver_labels_1 is changed + - driver_labels_2 is not changed + - driver_labels_3 is not changed + - driver_labels_4 is not changed + - driver_labels_5 is not changed + - driver_labels_6 is changed diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/aliases new file mode 100644 index 00000000..02b78723 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/aliases @@ -0,0 +1,2 @@ +shippable/posix/group4 +destructive diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/tasks/main.yml new file mode 100644 index 00000000..30f71e32 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/tasks/main.yml @@ -0,0 +1,74 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- block: + - name: Create random volume name + set_fact: + cname: "{{ 'ansible-test-%0x' % ((2**32) | random) }}" + + - name: Make sure volume is not there + docker_volume: + name: "{{ cname }}" + state: absent + + - name: Inspect a non-present volume + docker_volume_info: + name: "{{ cname }}" + register: result + + - assert: + that: + - "not result.exists" + - "'volume' in result" + - "result.volume is none" + + - name: Make sure volume exists + docker_volume: + name: "{{ cname }}" + + - name: Inspect a present volume + docker_volume_info: + name: "{{ cname }}" + register: result + - name: Dump docker_volume_info result + debug: var=result + + - name: "Comparison: use 'docker volume inspect'" + command: docker volume inspect "{{ cname }}" + register: docker_volume_inspect + ignore_errors: yes + - block: + - set_fact: + docker_volume_inspect_result: "{{ docker_volume_inspect.stdout | from_json }}" + - name: Dump docker volume inspect result + debug: var=docker_volume_inspect_result + when: docker_volume_inspect is not failed + + - name: Cleanup + docker_volume: + name: "{{ cname }}" + state: absent + + - assert: + that: + - result.exists + - "'volume' in result" + - "result.volume" + + - assert: + that: + - "result.volume == docker_volume_inspect_result[0]" + when: docker_volume_inspect is not failed + - assert: + that: + - "'is too new. Maximum supported API version is' in docker_volume_inspect.stderr" + when: docker_volume_inspect is failed + + # Requirements for docker_volume + when: docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.24', '>=') + +- fail: msg="Too old docker / docker-py version to run docker_volume_info tests!" + when: not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.24', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/aliases new file mode 100644 index 00000000..d4bad10d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/aliases @@ -0,0 +1,3 @@ +shippable/posix/group4 +destructive +needs/root diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/inventory_1.docker.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/inventory_1.docker.yml new file mode 100644 index 00000000..60a5b056 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/inventory_1.docker.yml @@ -0,0 +1,2 @@ +plugin: community.docker.docker_containers +docker_host: unix://var/run/docker.sock diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/inventory_2.docker.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/inventory_2.docker.yml new file mode 100644 index 00000000..ec8db12e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/inventory_2.docker.yml @@ -0,0 +1,6 @@ +plugin: community.docker.docker_containers +docker_host: unix://var/run/docker.sock +connection_type: ssh +verbose_output: true +add_legacy_groups: true +default_ip: 1.2.3.4 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/docker_cleanup.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/docker_cleanup.yml new file mode 100644 index 00000000..ef0ac9e9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/docker_cleanup.yml @@ -0,0 +1,22 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: yes + tasks: + - name: remove docker containers + docker_container: + name: "{{ item }}" + state: absent + force_kill: yes + loop: + - ansible-test-docker-inventory-container-1 + - ansible-test-docker-inventory-container-2 + + - name: remove docker pagkages + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: + - docker + - docker-ce + - docker-ce-cli + state: absent diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/docker_setup.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/docker_setup.yml new file mode 100644 index 00000000..89ebdb4f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/docker_setup.yml @@ -0,0 +1,22 @@ +--- +- hosts: 127.0.0.1 + connection: local + vars: + docker_skip_cleanup: yes + + tasks: + - name: Setup docker + import_role: + name: setup_docker + + - name: Start containers + docker_container: + name: "{{ item.name }}" + image: "{{ docker_test_image_alpine }}" + state: started + command: '/bin/sh -c "sleep 10m"' + published_ports: + - 22/tcp + loop: + - name: ansible-test-docker-inventory-container-1 + - name: ansible-test-docker-inventory-container-2 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_1.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_1.yml new file mode 100644 index 00000000..aa18e19c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_1.yml @@ -0,0 +1,36 @@ +--- +- hosts: 127.0.0.1 + connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22 + gather_facts: no + tasks: + - name: Show all groups + debug: + var: groups + - name: Make sure that the default groups are there, but no others + assert: + that: + - groups.all | length >= 2 + - groups.ungrouped | length >= 2 + - groups | length == 2 + +- hosts: all + gather_facts: false + tasks: + - when: + # When the integration tests are run inside a docker container, there + # will be other containers. + - inventory_hostname.startswith('ansible-test-docker-inventory-container-') + block: + + - name: Run raw command + raw: ls / + register: output + + - name: Check whether we have some directories we expect in the output + assert: + that: + - "'bin' in output.stdout_lines" + - "'dev' in output.stdout_lines" + - "'lib' in output.stdout_lines" + - "'proc' in output.stdout_lines" + - "'sys' in output.stdout_lines" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_2.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_2.yml new file mode 100644 index 00000000..c17d2840 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_2.yml @@ -0,0 +1,45 @@ +--- +- hosts: 127.0.0.1 + connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22 + gather_facts: no + tasks: + - name: Show all groups + debug: + var: groups + - name: Load variables + include_vars: ../../setup_docker/vars/main.yml + - name: Make sure that the expected groups are there + assert: + that: + - groups.all | length >= 2 + - groups.ungrouped | length >= 0 + - groups.running | length >= 2 + - groups.stopped | length >= 0 + - groups['image_' ~ docker_test_image_alpine] | length == 2 + - groups['ansible-test-docker-inventory-container-1'] | length == 1 + - groups['ansible-test-docker-inventory-container-2'] | length == 1 + - groups['unix://var/run/docker.sock'] | length >= 2 + - groups | length >= 12 + # The four additional groups are IDs and short IDs of the containers. + # When the integration tests are run inside a docker container, there + # will be more groups (for the additional container(s)). + +- hosts: all + # We don't really want to connect to the nodes, since we have no SSH daemon running on them + connection: local + vars: + ansible_python_interpreter: "{{ ansible_playbook_python }}" + gather_facts: no + tasks: + - name: Show all variables + debug: + var: hostvars[inventory_hostname] + - name: Make sure SSH is set up + assert: + that: + - ansible_ssh_host == '1.2.3.4' + - ansible_ssh_port == docker_networksettings.Ports['22/tcp'][0].HostPort + when: + # When the integration tests are run inside a docker container, there + # will be other containers. + - inventory_hostname.startswith('ansible-test-docker-inventory-container-') diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/runme.sh b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/runme.sh new file mode 100755 index 00000000..0ea425b8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/runme.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x + +set -euo pipefail + +cleanup() { + echo "Cleanup" + ansible-playbook playbooks/docker_cleanup.yml + echo "Done" +} + +trap cleanup INT TERM EXIT + +echo "Setup" +ANSIBLE_ROLES_PATH=.. ansible-playbook playbooks/docker_setup.yml + +echo "Test docker_containers inventory 1" +ansible-playbook -i inventory_1.docker.yml playbooks/test_inventory_1.yml + +echo "Test docker_containers inventory 2" +ansible-playbook -i inventory_2.docker.yml playbooks/test_inventory_2.yml diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/aliases new file mode 100644 index 00000000..1a7a54b4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/aliases @@ -0,0 +1,4 @@ +disabled +shippable/posix/group2 +destructive +needs/root diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/docker-machine b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/docker-machine new file mode 100644 index 00000000..be5d00c5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/docker-machine @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# Mock Docker Machine wrapper for testing purposes + +[ "$MOCK_ERROR_IN" == "$1" ] && echo >&2 "Mock Docker Machine error" && exit 1 +case $1 in + env) + cat <<'EOF' +export DOCKER_TLS_VERIFY="1" +export DOCKER_HOST="tcp://134.209.204.160:2376" +export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator" +export DOCKER_MACHINE_NAME="routinator" +# Run this command to configure your shell: +# eval $(docker-machine env --shell=bash routinator) +EOF + ;; + + *) + /usr/bin/docker-machine $* + ;; +esac diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_1.docker_machine.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_1.docker_machine.yml new file mode 100644 index 00000000..caf34787 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_1.docker_machine.yml @@ -0,0 +1 @@ +plugin: community.docker.docker_machine diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_2.docker_machine.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_2.docker_machine.yml new file mode 100644 index 00000000..00efaec9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_2.docker_machine.yml @@ -0,0 +1,2 @@ +plugin: community.docker.docker_machine +daemon_env: require diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_3.docker_machine.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_3.docker_machine.yml new file mode 100644 index 00000000..0fe42019 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_3.docker_machine.yml @@ -0,0 +1,2 @@ +plugin: community.docker.docker_machine +daemon_env: optional diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/pre-setup.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/pre-setup.yml new file mode 100644 index 00000000..9f526fb4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/pre-setup.yml @@ -0,0 +1,18 @@ +--- +- hosts: 127.0.0.1 + connection: local + tasks: + - name: Setup docker + include_role: + name: setup_docker + + # There seems to be no better way to install docker-machine. At least I couldn't find any packages for RHEL7/8. + - name: Download docker-machine binary + vars: + docker_machine_version: "0.16.1" + get_url: + url: "https://github.com/docker/machine/releases/download/v{{ docker_machine_version }}/docker-machine-{{ ansible_system }}-{{ ansible_userspace_architecture }}" + dest: /tmp/docker-machine + - name: Install docker-machine binary + command: install /tmp/docker-machine /usr/bin/docker-machine + become: yes diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/setup.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/setup.yml new file mode 100644 index 00000000..78042b62 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/setup.yml @@ -0,0 +1,11 @@ +--- +- hosts: 127.0.0.1 + connection: local + tasks: + - name: Request Docker Machine to use this machine as a generic VM + command: "docker-machine --debug create \ + --driver generic \ + --generic-ip-address=localhost \ + --generic-ssh-key {{ lookup('env', 'HOME') }}/.ssh/id_rsa \ + --generic-ssh-user root \ + vm" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/teardown.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/teardown.yml new file mode 100644 index 00000000..b272c094 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/teardown.yml @@ -0,0 +1,6 @@ +--- +- hosts: 127.0.0.1 + connection: local + tasks: + - name: Request Docker Machine to remove this machine as a generic VM + command: "docker-machine rm vm -f" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/test_inventory_1.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/test_inventory_1.yml new file mode 100644 index 00000000..d75c69b2 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/test_inventory_1.yml @@ -0,0 +1,50 @@ +- hosts: 127.0.0.1 + gather_facts: no + tasks: + - name: sanity check Docker Machine output + vars: + dm_ls_format: !unsafe '{{.Name}} | {{.DriverName}} | {{.State}} | {{.URL}} | {{.Error}}' + success_regex: "^vm | [^|]+ | Running | tcp://.+ |$" + command: docker-machine ls --format '{{ dm_ls_format }}' + register: result + failed_when: result.rc != 0 or result.stdout is not match(success_regex) + + - name: verify Docker Machine ip + command: docker-machine ip vm + register: result + failed_when: result.rc != 0 or result.stdout != hostvars['vm'].ansible_host + + - name: verify Docker Machine env + command: docker-machine env --shell=sh vm + register: result + + - debug: var=result.stdout + + - assert: + that: + - "'DOCKER_TLS_VERIFY=\"{{ hostvars['vm'].dm_DOCKER_TLS_VERIFY }}\"' in result.stdout" + - "'DOCKER_HOST=\"{{ hostvars['vm'].dm_DOCKER_HOST }}\"' in result.stdout" + - "'DOCKER_CERT_PATH=\"{{ hostvars['vm'].dm_DOCKER_CERT_PATH }}\"' in result.stdout" + - "'DOCKER_MACHINE_NAME=\"{{ hostvars['vm'].dm_DOCKER_MACHINE_NAME }}\"' in result.stdout" + +- hosts: vm + gather_facts: no + tasks: + - name: do something to verify that accept-new ssh setting was applied by the docker-machine inventory plugin + raw: uname -a + register: result + + - debug: var=result.stdout + +- hosts: 127.0.0.1 + gather_facts: no + environment: + DOCKER_CERT_PATH: "{{ hostvars['vm'].dm_DOCKER_CERT_PATH }}" + DOCKER_HOST: "{{ hostvars['vm'].dm_DOCKER_HOST }}" + DOCKER_MACHINE_NAME: "{{ hostvars['vm'].dm_DOCKER_MACHINE_NAME }}" + DOCKER_TLS_VERIFY: "{{ hostvars['vm'].dm_DOCKER_TLS_VERIFY }}" + tasks: + - name: run a Docker container on the target Docker Machine host to verify that Docker daemon connection settings from the docker-machine inventory plugin work as expected + docker_container: + name: test + image: "{{ docker_test_image_hello_world }}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/runme.sh b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/runme.sh new file mode 100755 index 00000000..074e64fc --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/runme.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +SCRIPT_DIR=$(dirname "$0") + +echo "Who am I: $(whoami)" +echo "Home: ${HOME}" +echo "PWD: $(pwd)" +echo "Script dir: ${SCRIPT_DIR}" + +# restrict Ansible just to our inventory plugin, to prevent inventory data being matched by the test but being provided +# by some other dynamic inventory provider +export ANSIBLE_INVENTORY_ENABLED=docker_machine + +[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x + +set -euo pipefail + +SAVED_PATH="$PATH" + +cleanup() { + PATH="${SAVED_PATH}" + echo "Cleanup" + ansible-playbook -i teardown.docker_machine.yml playbooks/teardown.yml + echo "Done" +} + +trap cleanup INT TERM EXIT + +echo "Pre-setup (install docker, docker-machine)" +ANSIBLE_ROLES_PATH=.. ansible-playbook playbooks/pre-setup.yml + +echo "Print docker-machine version" +docker-machine --version + +echo "Check preconditions" +# Host should NOT be known to Ansible before the test starts +ansible-inventory -i inventory_1.docker_machine.yml --host vm >/dev/null && exit 1 + +echo "Test that the docker_machine inventory plugin is being loaded" +ANSIBLE_DEBUG=yes ansible-inventory -i inventory_1.docker_machine.yml --list | grep -F "Loading InventoryModule 'docker_machine'" + +echo "Setup" +ansible-playbook playbooks/setup.yml + +echo "Test docker_machine inventory 1" +ansible-playbook -i inventory_1.docker_machine.yml playbooks/test_inventory_1.yml + +echo "Activate Docker Machine mock" +PATH=${SCRIPT_DIR}:$PATH + +echo "Test docker_machine inventory 2: daemon_env=require daemon env success=yes" +ansible-inventory -i inventory_2.docker_machine.yml --list + +echo "Test docker_machine inventory 2: daemon_env=require daemon env success=no" +export MOCK_ERROR_IN=env +ansible-inventory -i inventory_2.docker_machine.yml --list +unset MOCK_ERROR_IN + +echo "Test docker_machine inventory 3: daemon_env=optional daemon env success=yes" +ansible-inventory -i inventory_3.docker_machine.yml --list + +echo "Test docker_machine inventory 3: daemon_env=optional daemon env success=no" +export MOCK_ERROR_IN=env +ansible-inventory -i inventory_2.docker_machine.yml --list +unset MOCK_ERROR_IN + +echo "Deactivate Docker Machine mock" +PATH="${SAVED_PATH}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/teardown.docker_machine.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/teardown.docker_machine.yml new file mode 100644 index 00000000..046f4782 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/teardown.docker_machine.yml @@ -0,0 +1,3 @@ +plugin: community.docker.docker_machine +daemon_env: skip +running_required: false diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/aliases new file mode 100644 index 00000000..f225f586 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +destructive +needs/root diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/inventory_1.docker_swarm.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/inventory_1.docker_swarm.yml new file mode 100644 index 00000000..03e11820 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/inventory_1.docker_swarm.yml @@ -0,0 +1,2 @@ +plugin: community.docker.docker_swarm +docker_host: unix://var/run/docker.sock diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/inventory_2.docker_swarm.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/inventory_2.docker_swarm.yml new file mode 100644 index 00000000..03d3c554 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/inventory_2.docker_swarm.yml @@ -0,0 +1,4 @@ +plugin: community.docker.docker_swarm +docker_host: unix://var/run/docker.sock +verbose_output: false +include_host_uri: true diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/meta/main.yml new file mode 100644 index 00000000..07da8c6d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_cleanup.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_cleanup.yml new file mode 100644 index 00000000..9cf87159 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_cleanup.yml @@ -0,0 +1,18 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: yes + tasks: + - name: Make sure swarm is removed + docker_swarm: + state: absent + force: yes + + - name: remove docker pagkages + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: + - docker + - docker-ce + - docker-ce-cli + state: absent diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_setup.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_setup.yml new file mode 100644 index 00000000..6fe0f874 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_setup.yml @@ -0,0 +1,15 @@ +--- +- hosts: 127.0.0.1 + connection: local + vars: + docker_skip_cleanup: yes + + tasks: + - name: Setup docker + import_role: + name: setup_docker + + - name: Create a Swarm cluster + community.docker.docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_1.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_1.yml new file mode 100644 index 00000000..600a89b1 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_1.yml @@ -0,0 +1,58 @@ +--- +- hosts: 127.0.0.1 + connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22 + gather_facts: no + tasks: + - name: Show all groups + debug: + var: groups + - name: Make sure docker_swarm groups are there + assert: + that: + - groups.all | length > 0 + - groups.leader | length == 1 + - groups.manager | length > 0 + - groups.worker | length >= 0 + - groups.nonleaders | length >= 0 + +- hosts: all + connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22 + vars: + # for some reason, Ansible can't find the Python interpreter when connecting to the nodes, + # which is in fact just localhost in disguise. That's why we use ansible_playbook_python. + ansible_python_interpreter: "{{ ansible_playbook_python }}" + tasks: + - name: Check for groups + assert: + that: + - "groups.manager | length > 0" + - "groups.worker | length >= 0" + - "groups.leader | length == 1" + run_once: yes + + - name: List manager group + debug: + var: groups.manager + run_once: yes + + - name: List worker group + debug: + var: groups.worker + run_once: yes + + - name: List leader group + debug: + var: groups.leader + run_once: yes + + - name: Print ansible_host per host + debug: + var: ansible_host + + - name: Make sure docker_swarm_node_attributes is available + assert: + that: + - docker_swarm_node_attributes is not undefined + - name: Print docker_swarm_node_attributes per host + debug: + var: docker_swarm_node_attributes diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_2.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_2.yml new file mode 100644 index 00000000..b2a794d3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_2.yml @@ -0,0 +1,35 @@ +--- +- hosts: 127.0.0.1 + connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22 + gather_facts: no + tasks: + - name: Show all groups + debug: + var: groups + - name: Make sure docker_swarm groups are there + assert: + that: + - groups.all | length > 0 + - groups.leader | length == 1 + - groups.manager | length > 0 + - groups.worker | length >= 0 + - groups.nonleaders | length >= 0 + +- hosts: all + connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22 + vars: + # for some reason, Ansible can't find the Python interpreter when connecting to the nodes, + # which is in fact just localhost in disguise. That's why we use ansible_playbook_python. + ansible_python_interpreter: "{{ ansible_playbook_python }}" + tasks: + - name: Make sure docker_swarm_node_attributes is not available + assert: + that: + - docker_swarm_node_attributes is undefined + - name: Make sure ansible_host_uri is available + assert: + that: + - ansible_host_uri is defined + - name: Print ansible_host_uri + debug: + var: ansible_host_uri diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/runme.sh b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/runme.sh new file mode 100755 index 00000000..b93d386a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/runme.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x + +set -euo pipefail + +cleanup() { + echo "Cleanup" + ansible-playbook playbooks/swarm_cleanup.yml + echo "Done" +} + +trap cleanup INT TERM EXIT + +echo "Setup" +ANSIBLE_ROLES_PATH=.. ansible-playbook playbooks/swarm_setup.yml + +echo "Test docker_swarm inventory 1" +ansible-playbook -i inventory_1.docker_swarm.yml playbooks/test_inventory_1.yml + +echo "Test docker_swarm inventory 2" +ansible-playbook -i inventory_2.docker_swarm.yml playbooks/test_inventory_2.yml diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/aliases new file mode 100644 index 00000000..65e83152 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/aliases @@ -0,0 +1 @@ +needs/target/setup_epel diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/defaults/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/defaults/main.yml new file mode 100644 index 00000000..661a309e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/defaults/main.yml @@ -0,0 +1,19 @@ +--- +docker_cli_version: '0.0' +docker_api_version: '0.0' +docker_py_version: '0.0' +docker_skip_cleanup: yes +docker_prereq_packages: [] +docker_packages: + - docker-ce +docker_cli_packages: + - docker-ce-cli + +docker_pip_extra_packages: [] +docker_pip_package: docker +docker_pip_package_limit: '' + +docker_cleanup_packages: + - docker + - docker-ce + - docker-ce-cli diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/handlers/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/handlers/main.yml new file mode 100644 index 00000000..a389f91d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/handlers/main.yml @@ -0,0 +1,14 @@ +- name: Remove pip packages + pip: + state: present + name: "{{ [docker_pip_package] | union(docker_pip_extra_packages) }}" + listen: cleanup docker + when: not docker_skip_cleanup | bool + +- name: Remove docker pagkages + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: "{{ docker_cleanup_packages }}" + state: absent + listen: cleanup docker + when: not docker_skip_cleanup | bool diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/meta/main.yml new file mode 100644 index 00000000..2be15776 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - setup_remote_constraints + - setup_pkg_mgr diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Debian.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Debian.yml new file mode 100644 index 00000000..84a1428b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Debian.yml @@ -0,0 +1,45 @@ +--- +- name: Get OS version + shell: uname -r + register: os_version + +- name: Install pre-reqs + apt: + name: '{{ docker_prereq_packages }}' + state: present + update_cache: true + notify: cleanup docker + +- name: Add gpg key + shell: curl -fsSL https://download.docker.com/linux/ubuntu/gpg >key && apt-key add key + +- name: Add Docker repo + shell: add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + +- block: + - name: Prevent service restart + copy: + content: exit 101 + dest: /usr/sbin/policy-rc.d + backup: true + mode: '0755' + register: policy_rc_d + + - name: Install Docker CE + apt: + name: '{{ docker_packages if needs_docker_daemon else docker_cli_packages }}' + state: present + update_cache: true + + always: + - name: Restore /usr/sbin/policy-rc.d (if needed) + command: mv {{ policy_rc_d.backup_file }} /usr/sbin/policy-rc.d + when: + - '''backup_file'' in policy_rc_d' + + - name: Remove /usr/sbin/policy-rc.d (if needed) + file: + path: /usr/sbin/policy-rc.d + state: absent + when: + - '''backup_file'' not in policy_rc_d' diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Fedora.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Fedora.yml new file mode 100644 index 00000000..f50cc2c8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Fedora.yml @@ -0,0 +1,26 @@ +--- +- name: Import GPG key + rpm_key: + key: https://download.docker.com/linux/fedora/gpg + state: present + +- name: Add repository + yum_repository: + file: docker-ce + name: docker-ce-stable + description: Docker CE Stable - $basearch + baseurl: https://download.docker.com/linux/fedora/{{ 31 if ansible_facts.distribution_major_version|int > 31 else '$releasever' }}/$basearch/stable + enabled: yes + gpgcheck: yes + +- name: Update cache + command: dnf makecache + args: + warn: no + +- name: Install docker + dnf: + name: "{{ docker_packages if needs_docker_daemon else docker_cli_packages }}" + state: present + enablerepo: docker-ce-test + notify: cleanup docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-7.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-7.yml new file mode 100644 index 00000000..abcc2b8a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-7.yml @@ -0,0 +1,46 @@ +--- +# The RHEL extras repository must be enabled to provide the container-selinux package. +# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository + +- name: Install Docker pre-reqs + yum: + name: "{{ docker_prereq_packages }}" + state: present + notify: cleanup docker + +- name: Install epel repo which is missing on rhel-7 and is needed for pigz (needed for docker-ce 18) + include_role: + name: setup_epel + +- name: Enable extras repository for RHEL on AWS + # RHEL 7.6 uses REGION-rhel-server-extras and RHEL 7.7+ use rhel-7-server-rhui-extras-rpms + command: yum-config-manager --enable REGION-rhel-server-extras rhel-7-server-rhui-extras-rpms + args: + warn: no + +# Docker broke their .repo file, so we set it up ourselves +- name: Set-up repository + yum_repository: + name: docker-ce + description: docker-ce + baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable + gpgcheck: true + gpgkey: https://download.docker.com/linux/centos/gpg + +- name: Update cache + command: yum -y makecache fast + args: + warn: no + +- name: Install docker + yum: + name: "{{ docker_packages if needs_docker_daemon else docker_cli_packages }}" + state: present + notify: cleanup docker + +- name: Make sure the docker daemon is running (failure expected inside docker container) + service: + name: docker + state: started + ignore_errors: "{{ ansible_virtualization_type in ['docker', 'container', 'containerd'] }}" + when: needs_docker_daemon diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-8.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-8.yml new file mode 100644 index 00000000..a7c7d586 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-8.yml @@ -0,0 +1,35 @@ +--- +# The RHEL extras repository must be enabled to provide the container-selinux package. +# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository + +- name: Install Docker pre-reqs + dnf: + name: "{{ docker_prereq_packages }}" + state: present + notify: cleanup docker + register: result + until: result is success + retries: 10 + delay: 2 + +# Docker broke their .repo file, so we set it up ourselves +- name: Set-up repository + yum_repository: + name: docker-ce + description: docker-ce + baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable + gpgcheck: true + gpgkey: https://download.docker.com/linux/centos/gpg + +- name: Install docker + dnf: + name: "{{ docker_packages if needs_docker_daemon else docker_cli_packages }}" + state: present + notify: cleanup docker + +- name: Make sure the docker daemon is running (failure expected inside docker container) + service: + name: docker + state: started + ignore_errors: "{{ ansible_virtualization_type in ['docker', 'container', 'containerd'] }}" + when: needs_docker_daemon diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Suse.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Suse.yml new file mode 100644 index 00000000..52cdc1ba --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Suse.yml @@ -0,0 +1,8 @@ +--- +- name: Install docker 17 + community.general.zypper: + name: "{{ docker_packages if needs_docker_daemon else docker_cli_packages }}" + force: yes + disable_gpg_check: yes + update_cache: yes + notify: cleanup docker diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/main.yml new file mode 100644 index 00000000..ed54a6da --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/main.yml @@ -0,0 +1,154 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Setup Docker + when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6'] + block: + - name: Detect whether we are running inside a container + current_container_facts: + + - name: Determine whether Docker Daemon needs to be installed + set_fact: + needs_docker_daemon: '{{ not ansible_module_running_in_container }}' + + - name: Include distribution specific variables + include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + files: + - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml" + - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml" + - "{{ ansible_facts.distribution }}.yml" + - "{{ ansible_facts.os_family }}.yml" + - default.yml + paths: + - "{{ role_path }}/vars" + + - name: Include distribution specific tasks + include_tasks: "{{ lookup('first_found', params) }}" + vars: + params: + files: + - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml" + - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml" + - "{{ ansible_facts.distribution }}.yml" + - "{{ ansible_facts.os_family }}.yml" + paths: + - "{{ role_path }}/tasks" + + # Detect docker API version + - name: Check Docker API version + command: "docker version -f {% raw %}'{{(index .Server.Components 0).Details.ApiVersion}}'{% endraw %}" + register: docker_api_version_stdout + ignore_errors: yes + + - name: Limit docker pypi package version to < 4.3.0 + set_fact: + docker_pip_package_limit: '<4.3.0' + when: (docker_api_version_stdout.stdout | default('0.0')) is version('1.39', '<') + + - name: Install Python requirements + pip: + state: present + name: "{{ [docker_pip_package ~ docker_pip_package_limit] + docker_pip_extra_packages }}" + extra_args: "-c {{ remote_constraints }}" + notify: cleanup docker + + # Detect docker CLI, API and docker-py versions + - name: Check Docker CLI version + command: "docker version -f {% raw %}'{{.Client.Version}}'{% endraw %}" + register: docker_cli_version_stdout + ignore_errors: yes + + - name: Check Docker API version + command: "{{ ansible_python.executable }} -c 'import docker; print(docker.from_env().version()[\"ApiVersion\"])'" + register: docker_api_version_stdout + ignore_errors: yes + + - name: Check docker-py API version + command: "{{ ansible_python.executable }} -c 'import docker; print(docker.__version__)'" + register: docker_py_version_stdout + ignore_errors: yes + + - set_fact: + docker_cli_version: "{{ docker_cli_version_stdout.stdout | default('0.0') }}" + docker_api_version: "{{ docker_api_version_stdout.stdout | default('0.0') }}" + docker_py_version: "{{ docker_py_version_stdout.stdout | default('0.0') }}" + + - debug: + msg: "Docker CLI version: {{ docker_cli_version }}; Docker API version: {{ docker_api_version }}; docker-py library version: {{ docker_py_version }}" + + - block: + # Cleanup docker daemon + - name: "Remove all ansible-test-* docker containers" + shell: 'docker ps --no-trunc --format {% raw %}"{{.Names}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker rm -f' + register: docker_containers + retries: 3 + delay: 3 + until: docker_containers is success + ignore_errors: yes + + - name: "Remove all ansible-test-* docker volumes" + shell: 'docker volume ls --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker volume rm -f' + register: docker_volumes + ignore_errors: yes + + - name: "Remove all ansible-test-* docker networks" + shell: 'docker network ls --no-trunc --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker network rm' + register: docker_networks + ignore_errors: yes + + - name: Cleaned docker resources + debug: + var: docker_resources + vars: + docker_resources: + containers: "{{ docker_containers.stdout_lines | default([]) }}" + volumes: "{{ docker_volumes.stdout_lines | default([]) }}" + networks: "{{ docker_networks.stdout_lines | default([]) }}" + + # List all existing docker resources + - name: List all docker containers + command: docker ps --no-trunc -a + register: docker_containers + ignore_errors: yes + + - name: List all docker volumes + command: docker volume ls + register: docker_volumes + ignore_errors: yes + + - name: List all docker networks + command: docker network ls --no-trunc + register: docker_networks + ignore_errors: yes + + - name: List all docker images + command: docker images --no-trunc -a + register: docker_images + ignore_errors: yes + + - name: Still existing docker resources + debug: + var: docker_resources + vars: + docker_resources: + containers: "{{ docker_containers.stdout_lines | default([]) }}" + volumes: "{{ docker_volumes.stdout_lines | default([]) }}" + networks: "{{ docker_networks.stdout_lines | default([]) }}" + images: "{{ docker_images.stdout_lines | default([]) }}" + + when: docker_cli_version is version('0.0', '>') + + - name: Inspect current container + docker_container_info: + name: "{{ ansible_module_container_id }}" + register: current_container_info + when: ansible_module_running_in_container + + - name: Determine network name + set_fact: + current_container_network_ip: "{{ (current_container_info.container.NetworkSettings.Networks | dictsort)[0].0 | default('') if ansible_module_running_in_container else '' }}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Debian.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Debian.yml new file mode 100644 index 00000000..66f3ac9f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Debian.yml @@ -0,0 +1,6 @@ +--- +docker_prereq_packages: + - apt-transport-https + - ca-certificates + - curl + - software-properties-common diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Fedora.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Fedora.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Fedora.yml @@ -0,0 +1 @@ +--- diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-7.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-7.yml new file mode 100644 index 00000000..100a878d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-7.yml @@ -0,0 +1,9 @@ +--- +docker_prereq_packages: + - yum-utils + - device-mapper-persistent-data + - lvm2 + - libseccomp + +docker_pip_extra_packages: + - requests==2.6.0 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-8.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-8.yml new file mode 100644 index 00000000..99af35f4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-8.yml @@ -0,0 +1,12 @@ +--- +docker_prereq_packages: + - yum-utils + - device-mapper-persistent-data + - lvm2 + - libseccomp + +docker_packages: + - docker-ce-19.03.13 + - docker-ce-cli-19.03.13 +docker_cli_packages: + - docker-ce-cli-19.03.13 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Suse.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Suse.yml new file mode 100644 index 00000000..1974462a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Suse.yml @@ -0,0 +1,8 @@ +--- +docker_packages: + - docker>=17 + +# OpenSUSE 15 does not seem to have docker-client (https://software.opensuse.org/package/docker-client) +# or any other Docker CLI-only package +docker_cli_packages: + - docker>=17 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Ubuntu-14.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Ubuntu-14.yml new file mode 100644 index 00000000..84130934 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Ubuntu-14.yml @@ -0,0 +1,6 @@ +--- +docker_pip_extra_packages: + # Installing requests >=2.12.0 on Ubuntu 14.04 breaks certificate validation. We restrict to an older version + # to ensure out get_url tests work out fine. This is only an issue if pyOpenSSL is also installed. + # Not sure why RHEL7 needs this specific version + - requests==2.6.0 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/default.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/default.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/default.yml @@ -0,0 +1 @@ +--- diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/main.env b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/main.env new file mode 100644 index 00000000..0fd37705 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/main.env @@ -0,0 +1,2 @@ +# Docker images for runme.sh based tests +DOCKER_TEST_IMAGE_PYTHON3=python:3-alpine diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/main.yml new file mode 100644 index 00000000..8d4b74c5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/main.yml @@ -0,0 +1,11 @@ +--- +docker_test_image_digest_v1: e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9 +docker_test_image_digest_v2: ee44b399df993016003bf5466bd3eeb221305e9d0fa831606bc7902d149c775b +docker_test_image_digest_base: quay.io/ansible/docker-test-containers +docker_test_image_hello_world: quay.io/ansible/docker-test-containers:hello-world +docker_test_image_hello_world_base: quay.io/ansible/docker-test-containers +docker_test_image_busybox: quay.io/ansible/docker-test-containers:busybox +docker_test_image_alpine: quay.io/ansible/docker-test-containers:alpine3.8 +docker_test_image_alpine_different: quay.io/ansible/docker-test-containers:alpine3.7 +docker_test_image_registry_nginx: quay.io/ansible/docker-test-containers:nginx-alpine +docker_test_image_registry: registry:2.6.1 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/aliases new file mode 100644 index 00000000..688c8884 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/aliases @@ -0,0 +1,2 @@ +needs/target/setup_docker +needs/target/setup_openssl diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/files/nginx.conf b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/files/nginx.conf new file mode 100644 index 00000000..bfba5204 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/files/nginx.conf @@ -0,0 +1,46 @@ +events { + worker_connections 16; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + error_log /dev/stdout info; + access_log /dev/stdout; + + server { + listen *:5000 ssl; + server_name test-registry.ansible.com; + server_name_in_redirect on; + + ssl_protocols TLSv1.2; + ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256'; + ssl_ecdh_curve X25519:secp521r1:secp384r1; + ssl_prefer_server_ciphers on; + ssl_certificate /etc/nginx/cert.pem; + ssl_certificate_key /etc/nginx/cert.key; + + location / { + return 401; + } + + location /v2/ { + proxy_pass http://real-registry:5000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Request-Start $msec; + + client_max_body_size 0; + chunked_transfer_encoding on; + + auth_basic "Ansible Test Docker Registry"; + auth_basic_user_file /etc/nginx/nginx.htpasswd; + } + } +} diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/files/nginx.htpasswd b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/files/nginx.htpasswd new file mode 100644 index 00000000..1291ae77 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/files/nginx.htpasswd @@ -0,0 +1 @@ +testuser:{PLAIN}hunter2 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/handlers/cleanup.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/handlers/cleanup.yml new file mode 100644 index 00000000..f9d2c9e5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/handlers/cleanup.yml @@ -0,0 +1,55 @@ +--- +- name: "Make sure all images are removed" + docker_image: + name: "{{ item }}" + state: absent + with_items: "{{ docker_registry_setup_inames }}" + +- name: "Get registry logs" + command: "docker logs {{ docker_registry_container_name_registry }}" + register: registry_logs + no_log: yes + ignore_errors: yes + +- name: "Printing registry logs" + debug: + var: registry_logs.stdout_lines + when: registry_logs is not failed + +- name: "Get nginx logs for first instance" + command: "docker logs {{ docker_registry_container_name_nginx }}" + register: nginx_logs + no_log: yes + ignore_errors: yes + +- name: "Get nginx logs for second instance" + command: "docker logs {{ docker_registry_container_name_nginx2 }}" + register: nginx2_logs + no_log: yes + ignore_errors: yes + +- name: "Printing nginx logs for first instance" + debug: + var: nginx_logs.stdout_lines + when: nginx_logs is not failed + +- name: "Printing nginx logs for second instance" + debug: + var: nginx2_logs.stdout_lines + when: nginx_logs is not failed + +- name: "Make sure all containers are removed" + docker_container: + name: "{{ item }}" + state: absent + force_kill: yes + with_items: "{{ docker_registry_setup_cnames }}" + register: result + retries: 3 + delay: 3 + until: result is success + +- name: "Make sure all volumes are removed" + command: "docker rm -f {{ item }}" + with_items: "{{ docker_registry_setup_vnames }}" + ignore_errors: yes diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/handlers/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/handlers/main.yml new file mode 100644 index 00000000..23030182 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: Remove test registry + include_tasks: ../handlers/cleanup.yml diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/meta/main.yml new file mode 100644 index 00000000..b252b0e7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + #- setup_docker -- done in setup.yml, to work around cleanup problems! + - setup_openssl diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/main.yml new file mode 100644 index 00000000..320df246 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/main.yml @@ -0,0 +1,9 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6'] + include_tasks: + file: setup.yml diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/setup-frontend.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/setup-frontend.yml new file mode 100644 index 00000000..f1055ea3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/setup-frontend.yml @@ -0,0 +1,97 @@ +--- +# Set up first nginx frontend for registry +- name: Start nginx frontend for registry + docker_volume: + name: '{{ docker_registry_container_name_frontend }}' + state: present + +- name: Create container for nginx frontend for registry + docker_container: + state: stopped + name: '{{ docker_registry_container_name_frontend }}' + image: "{{ docker_test_image_registry_nginx }}" + ports: 5000 + # `links` does not work when using a network. That's why the docker_container task + # in setup.yml specifies `aliases` so we get the same effect. + links: + - '{{ docker_registry_container_name_registry }}:real-registry' + volumes: + - '{{ docker_registry_container_name_frontend }}:/etc/nginx/' + network_mode: '{{ current_container_network_ip | default(omit, true) }}' + networks: >- + {{ + [dict([['name', current_container_network_ip]])] + if current_container_network_ip not in ['', 'bridge'] else omit + }} + register: nginx_container + +- name: Copy static files into volume + command: docker cp {{ role_path }}/files/{{ item }} {{ docker_registry_container_name_frontend }}:/etc/nginx/{{ item }} + loop: + - nginx.conf + - nginx.htpasswd + register: can_copy_files + ignore_errors: yes + +- when: can_copy_files is not failed + block: + + - name: Create private key for frontend certificate + community.crypto.openssl_privatekey: + path: '{{ output_dir }}/cert.key' + type: ECC + curve: secp256r1 + force: yes + + - name: Create CSR for frontend certificate + community.crypto.openssl_csr: + path: '{{ output_dir }}/cert.csr' + privatekey_path: '{{ output_dir }}/cert.key' + subject_alt_name: + - DNS:test-registry.ansible.com + + - name: Create frontend certificate + community.crypto.openssl_certificate: + path: '{{ output_dir }}/cert.pem' + csr_path: '{{ output_dir }}/cert.csr' + privatekey_path: '{{ output_dir }}/cert.key' + provider: selfsigned + + - name: Copy dynamic files into volume + command: docker cp {{ output_dir }}/{{ item }} {{ docker_registry_container_name_frontend }}:/etc/nginx/{{ item }} + loop: + - cert.pem + - cert.key + + - name: Start nginx frontend for registry + docker_container: + name: '{{ docker_registry_container_name_frontend }}' + state: started + register: nginx_container + + - name: Output nginx container network settings + debug: + var: nginx_container.container.NetworkSettings + + - name: Get registry URL + set_fact: + # Note that this host/port combination is used by the Docker daemon, that's why `localhost` is appropriate! + # This host/port combination cannot be used if the tests are running inside a docker container. + docker_registry_frontend_address: localhost:{{ nginx_container.container.NetworkSettings.Ports['5000/tcp'].0.HostPort }} + # The following host/port combination can be used from inside the docker container. + docker_registry_frontend_address_internal: "{{ nginx_container.container.NetworkSettings.Networks[current_container_network_ip].IPAddress if current_container_network_ip else nginx_container.container.NetworkSettings.IPAddress }}:5000" + + - name: Wait for registry frontend + uri: + url: https://{{ docker_registry_frontend_address_internal }}/v2/ + url_username: testuser + url_password: hunter2 + validate_certs: false + register: result + until: result is success + retries: 5 + delay: 1 + +- set_fact: + docker_registry_frontend_address: 'n/a' + when: can_copy_files is failed diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/setup.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/setup.yml new file mode 100644 index 00000000..6782da9d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/setup.yml @@ -0,0 +1,80 @@ +--- +- name: Register registry cleanup + # This must be done **before** docker is set up (see next task), to ensure that the + # registry is removed **before** docker itself is removed. This is necessary as the + # registry and its frontends run as docker containers. + command: 'true' + notify: Remove test registry + +- name: Setup Docker + # Please note that we do setup_docker here and not via meta/main.yml to avoid the problem that + # our cleanup is called **after** setup_docker's cleanup has been called! + include_role: + name: setup_docker + +- name: Create random name prefix and test registry name + set_fact: + docker_registry_container_name_registry: '{{ ''ansible-test-registry-%0x'' % ((2**32) | random) }}' + docker_registry_container_name_nginx: '{{ ''ansible-test-registry-frontend-%0x'' % ((2**32) | random) }}' + docker_registry_container_name_nginx2: '{{ ''ansible-test-registry-frontend2-%0x'' % ((2**32) | random) }}' + +- name: Create image and container list + set_fact: + docker_registry_setup_inames: [] + docker_registry_setup_cnames: + - '{{ docker_registry_container_name_registry }}' + - '{{ docker_registry_container_name_nginx }}' + - '{{ docker_registry_container_name_nginx2 }}' + docker_registry_setup_vnames: + - '{{ docker_registry_container_name_nginx }}' + - '{{ docker_registry_container_name_nginx2 }}' + +- debug: + msg: Using test registry name {{ docker_registry_container_name_registry }} and nginx frontend names {{ docker_registry_container_name_nginx }} and {{ docker_registry_container_name_nginx2 }} + +- fail: msg="Too old docker / docker-py version to set up docker registry!" + when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) + +- when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=') + block: + + # Set up registry container + - name: Start test registry + docker_container: + name: '{{ docker_registry_container_name_registry }}' + image: "{{ docker_test_image_registry }}" + ports: 5000 + network_mode: '{{ current_container_network_ip | default(omit, true) }}' + # We need to define the alias `real-registry` here because the global `links` + # option for the NGINX containers (see setup-frontend.yml) does not work when + # using networks. + networks: >- + {{ + [dict([['name', current_container_network_ip], ['aliases', ['real-registry']]])] + if current_container_network_ip not in ['', 'bridge'] else omit + }} + register: registry_container + + - name: Get registry URL + set_fact: + registry_address: localhost:{{ registry_container.container.NetworkSettings.Ports['5000/tcp'].0.HostPort }} + + # Set up first nginx frontend for registry + - include_tasks: setup-frontend.yml + vars: + docker_registry_container_name_frontend: '{{ docker_registry_container_name_nginx }}' + + - set_fact: + registry_frontend_address: '{{ docker_registry_frontend_address }}' + + # Set up second nginx frontend for registry + - include_tasks: setup-frontend.yml + vars: + docker_registry_container_name_frontend: '{{ docker_registry_container_name_nginx2 }}' + + - set_fact: + registry_frontend2_address: '{{ docker_registry_frontend_address }}' + + # Print addresses for registry and frontends + - debug: + msg: "Registry available under {{ registry_address }}, NGINX frontends available under {{ registry_frontend_address }} and {{ registry_frontend2_address }}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/vars/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/vars/main.yml new file mode 100644 index 00000000..8d4b74c5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/vars/main.yml @@ -0,0 +1,11 @@ +--- +docker_test_image_digest_v1: e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9 +docker_test_image_digest_v2: ee44b399df993016003bf5466bd3eeb221305e9d0fa831606bc7902d149c775b +docker_test_image_digest_base: quay.io/ansible/docker-test-containers +docker_test_image_hello_world: quay.io/ansible/docker-test-containers:hello-world +docker_test_image_hello_world_base: quay.io/ansible/docker-test-containers +docker_test_image_busybox: quay.io/ansible/docker-test-containers:busybox +docker_test_image_alpine: quay.io/ansible/docker-test-containers:alpine3.8 +docker_test_image_alpine_different: quay.io/ansible/docker-test-containers:alpine3.7 +docker_test_image_registry_nginx: quay.io/ansible/docker-test-containers:nginx-alpine +docker_test_image_registry: registry:2.6.1 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_epel/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_epel/tasks/main.yml new file mode 100644 index 00000000..21627cfa --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_epel/tasks/main.yml @@ -0,0 +1,10 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Install EPEL + yum: + name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/setup_epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm + disable_gpg_check: true + when: ansible_facts.distribution in ['RedHat', 'CentOS'] diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/meta/main.yml new file mode 100644 index 00000000..2be15776 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - setup_remote_constraints + - setup_pkg_mgr diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/tasks/main.yml new file mode 100644 index 00000000..33030ab7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/tasks/main.yml @@ -0,0 +1,31 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Include OS-specific variables + include_vars: '{{ ansible_os_family }}.yml' + when: not ansible_os_family == "Darwin" + +- name: Install cryptography (Python 3) + become: true + package: + name: '{{ cryptography_package_name_python3 }}' + when: ansible_os_family != 'Darwin' and ansible_python_version is version('3.0', '>=') + +- name: Install cryptography (Python 2) + become: true + package: + name: '{{ cryptography_package_name }}' + when: ansible_os_family != 'Darwin' and ansible_python_version is version('3.0', '<') + +- name: Install cryptography (Darwin, and potentially upgrade for other OSes) + become: true + pip: + name: cryptography>=1.3.0 + extra_args: "-c {{ remote_constraints }}" + +- name: Register cryptography version + command: "{{ ansible_python.executable }} -c 'import cryptography; print(cryptography.__version__)'" + register: cryptography_version diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Debian.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Debian.yml new file mode 100644 index 00000000..5ddc85e4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Debian.yml @@ -0,0 +1,3 @@ +--- +cryptography_package_name: python-cryptography +cryptography_package_name_python3: python3-cryptography diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/FreeBSD.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/FreeBSD.yml new file mode 100644 index 00000000..cd7f36a7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/FreeBSD.yml @@ -0,0 +1,3 @@ +--- +cryptography_package_name: py27-cryptography +cryptography_package_name_python3: py36-cryptography diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/RedHat.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/RedHat.yml new file mode 100644 index 00000000..5ddc85e4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/RedHat.yml @@ -0,0 +1,3 @@ +--- +cryptography_package_name: python-cryptography +cryptography_package_name_python3: python3-cryptography diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Suse.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Suse.yml new file mode 100644 index 00000000..5ddc85e4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Suse.yml @@ -0,0 +1,3 @@ +--- +cryptography_package_name: python-cryptography +cryptography_package_name_python3: python3-cryptography diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_pkg_mgr/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_pkg_mgr/tasks/main.yml new file mode 100644 index 00000000..24d02228 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_pkg_mgr/tasks/main.yml @@ -0,0 +1,17 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- set_fact: + pkg_mgr: community.general.pkgng + ansible_pkg_mgr: community.general.pkgng + cacheable: yes + when: ansible_os_family == "FreeBSD" + +- set_fact: + pkg_mgr: community.general.zypper + ansible_pkg_mgr: community.general.zypper + cacheable: yes + when: ansible_os_family == "Suse" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/aliases b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/aliases new file mode 100644 index 00000000..1ad133ba --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/aliases @@ -0,0 +1 @@ +needs/file/tests/utils/constraints.txt diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/meta/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/meta/main.yml new file mode 100644 index 00000000..1810d4be --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/tasks/main.yml new file mode 100644 index 00000000..d4f8148c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/tasks/main.yml @@ -0,0 +1,13 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: record constraints.txt path on remote host + set_fact: + remote_constraints: "{{ remote_tmp_dir }}/constraints.txt" + +- name: copy constraints.txt to remote host + copy: + src: "{{ role_path }}/../../../utils/constraints.txt" + dest: "{{ remote_constraints }}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml new file mode 100644 index 00000000..229037c8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml @@ -0,0 +1,5 @@ +- name: delete temporary directory + include_tasks: default-cleanup.yml + +- name: delete temporary directory (windows) + include_tasks: windows-cleanup.yml diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml new file mode 100644 index 00000000..39872d74 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml @@ -0,0 +1,5 @@ +- name: delete temporary directory + file: + path: "{{ remote_tmp_dir }}" + state: absent + no_log: yes diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml new file mode 100644 index 00000000..1e0f51b8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml @@ -0,0 +1,11 @@ +- name: create temporary directory + tempfile: + state: directory + suffix: .test + register: remote_tmp_dir + notify: + - delete temporary directory + +- name: record temporary directory + set_fact: + remote_tmp_dir: "{{ remote_tmp_dir.path }}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml new file mode 100644 index 00000000..93d786f0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml @@ -0,0 +1,15 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: make sure we have the ansible_os_family and ansible_distribution_version facts + setup: + gather_subset: distribution + when: ansible_facts == {} + +- include_tasks: "{{ lookup('first_found', files)}}" + vars: + files: + - "{{ ansible_os_family | lower }}.yml" + - "default.yml" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/requirements.yml b/collections-debian-merged/ansible_collections/community/docker/tests/requirements.yml new file mode 100644 index 00000000..a242e324 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/requirements.yml @@ -0,0 +1,7 @@ +integration_tests_dependencies: +- ansible.posix +- community.internal_test_tools +- community.crypto +- community.general +unit_tests_dependencies: +- community.internal_test_tools diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.json b/collections-debian-merged/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.json new file mode 100644 index 00000000..c789a7fd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.json @@ -0,0 +1,7 @@ +{ + "include_symlinks": true, + "prefixes": [ + "plugins/" + ], + "output": "path-message" +} diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.py b/collections-debian-merged/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.py new file mode 100755 index 00000000..49806f2e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Prevent unwanted files from being added to the source tree.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys + + +def main(): + """Main entry point.""" + paths = sys.argv[1:] or sys.stdin.read().splitlines() + + allowed_extensions = ( + '.cs', + '.ps1', + '.psm1', + '.py', + ) + + skip_paths = set([ + ]) + + skip_directories = ( + ) + + for path in paths: + if path in skip_paths: + continue + + if any(path.startswith(skip_directory) for skip_directory in skip_directories): + continue + + ext = os.path.splitext(path)[1] + + if ext not in allowed_extensions: + print('%s: extension must be one of: %s' % (path, ', '.join(allowed_extensions))) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/sanity/ignore-2.10.txt b/collections-debian-merged/ansible_collections/community/docker/tests/sanity/ignore-2.10.txt new file mode 100644 index 00000000..e678db7a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/sanity/ignore-2.10.txt @@ -0,0 +1,3 @@ +plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path +tests/utils/shippable/check_matrix.py replace-urlopen +tests/utils/shippable/timing.py shebang diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/sanity/ignore-2.11.txt b/collections-debian-merged/ansible_collections/community/docker/tests/sanity/ignore-2.11.txt new file mode 100644 index 00000000..e678db7a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/sanity/ignore-2.11.txt @@ -0,0 +1,3 @@ +plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path +tests/utils/shippable/check_matrix.py replace-urlopen +tests/utils/shippable/timing.py shebang diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/sanity/ignore-2.9.txt b/collections-debian-merged/ansible_collections/community/docker/tests/sanity/ignore-2.9.txt new file mode 100644 index 00000000..e678db7a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/sanity/ignore-2.9.txt @@ -0,0 +1,3 @@ +plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path +tests/utils/shippable/check_matrix.py replace-urlopen +tests/utils/shippable/timing.py shebang diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/compat/__init__.py b/collections-debian-merged/ansible_collections/community/docker/tests/unit/compat/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/compat/__init__.py diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/compat/builtins.py b/collections-debian-merged/ansible_collections/community/docker/tests/unit/compat/builtins.py new file mode 100644 index 00000000..f60ee678 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/compat/builtins.py @@ -0,0 +1,33 @@ +# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# +# Compat for python2.7 +# + +# One unittest needs to import builtins via __import__() so we need to have +# the string that represents it +try: + import __builtin__ +except ImportError: + BUILTINS = 'builtins' +else: + BUILTINS = '__builtin__' diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/compat/mock.py b/collections-debian-merged/ansible_collections/community/docker/tests/unit/compat/mock.py new file mode 100644 index 00000000..0972cd2e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/compat/mock.py @@ -0,0 +1,122 @@ +# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python3.x's unittest.mock module +''' +import sys + +# Python 2.7 + +# Note: Could use the pypi mock library on python3.x as well as python2.x. It +# is the same as the python3 stdlib mock library + +try: + # Allow wildcard import because we really do want to import all of mock's + # symbols into this compat shim + # pylint: disable=wildcard-import,unused-wildcard-import + from unittest.mock import * +except ImportError: + # Python 2 + # pylint: disable=wildcard-import,unused-wildcard-import + try: + from mock import * + except ImportError: + print('You need the mock library installed on python2.x to run tests') + + +# Prior to 3.4.4, mock_open cannot handle binary read_data +if sys.version_info >= (3,) and sys.version_info < (3, 4, 4): + file_spec = None + + def _iterate_read_data(read_data): + # Helper for mock_open: + # Retrieve lines from read_data via a generator so that separate calls to + # readline, read, and readlines are properly interleaved + sep = b'\n' if isinstance(read_data, bytes) else '\n' + data_as_list = [l + sep for l in read_data.split(sep)] + + if data_as_list[-1] == sep: + # If the last line ended in a newline, the list comprehension will have an + # extra entry that's just a newline. Remove this. + data_as_list = data_as_list[:-1] + else: + # If there wasn't an extra newline by itself, then the file being + # emulated doesn't have a newline to end the last line remove the + # newline that our naive format() added + data_as_list[-1] = data_as_list[-1][:-1] + + for line in data_as_list: + yield line + + def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read` methoddline`, and `readlines` of the + file handle to return. This is an empty string by default. + """ + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return list(_data) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return type(read_data)().join(_data) + + def _readline_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _data: + yield line + + global file_spec + if file_spec is None: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + if mock is None: + mock = MagicMock(name='open', spec=open) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + _data = _iterate_read_data(read_data) + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + handle.readline.side_effect = _readline_side_effect() + handle.readlines.side_effect = _readlines_side_effect + + mock.return_value = handle + return mock diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/compat/unittest.py b/collections-debian-merged/ansible_collections/community/docker/tests/unit/compat/unittest.py new file mode 100644 index 00000000..98f08ad6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/compat/unittest.py @@ -0,0 +1,38 @@ +# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python2.7's unittest module +''' + +import sys + +# Allow wildcard import because we really do want to import all of +# unittests's symbols into this compat shim +# pylint: disable=wildcard-import,unused-wildcard-import +if sys.version_info < (2, 7): + try: + # Need unittest2 on python2.6 + from unittest2 import * + except ImportError: + print('You need unittest2 installed on python2.6.x to run tests') +else: + from unittest import * diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/connection/test_docker.py b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/connection/test_docker.py new file mode 100644 index 00000000..885f676c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/connection/test_docker.py @@ -0,0 +1,67 @@ +# (c) 2020 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from io import StringIO +import pytest + +from ansible_collections.community.docker.tests.unit.compat import mock +from ansible_collections.community.docker.tests.unit.compat import unittest +from ansible.errors import AnsibleError +from ansible.playbook.play_context import PlayContext +from ansible_collections.community.docker.plugins.connection.docker import Connection as DockerConnection + + +class TestDockerConnectionClass(unittest.TestCase): + + def setUp(self): + self.play_context = PlayContext() + self.play_context.prompt = ( + '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: ' + ) + self.in_stream = StringIO() + + def tearDown(self): + pass + + @mock.patch('ansible_collections.community.docker.plugins.connection.docker.Connection._old_docker_version', + return_value=('false', 'garbage', '', 1)) + @mock.patch('ansible_collections.community.docker.plugins.connection.docker.Connection._new_docker_version', + return_value=('docker version', '1.2.3', '', 0)) + def test_docker_connection_module_too_old(self, mock_new_docker_verison, mock_old_docker_version): + self.assertRaisesRegexp(AnsibleError, '^docker connection type requires docker 1.3 or higher$', + DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker') + + @mock.patch('ansible_collections.community.docker.plugins.connection.docker.Connection._old_docker_version', + return_value=('false', 'garbage', '', 1)) + @mock.patch('ansible_collections.community.docker.plugins.connection.docker.Connection._new_docker_version', + return_value=('docker version', '1.3.4', '', 0)) + def test_docker_connection_module(self, mock_new_docker_verison, mock_old_docker_version): + self.assertIsInstance(DockerConnection(self.play_context, self.in_stream, docker_command='/fake/docker'), + DockerConnection) + + # old version and new version fail + @mock.patch('ansible_collections.community.docker.plugins.connection.docker.Connection._old_docker_version', + return_value=('false', 'garbage', '', 1)) + @mock.patch('ansible_collections.community.docker.plugins.connection.docker.Connection._new_docker_version', + return_value=('false', 'garbage', '', 1)) + def test_docker_connection_module_wrong_cmd(self, mock_new_docker_version, mock_old_docker_version): + self.assertRaisesRegexp(AnsibleError, '^Docker version check (.*?) failed: ', + DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker') diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/inventory/test_docker_containers.py b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/inventory/test_docker_containers.py new file mode 100644 index 00000000..b729d9bd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/inventory/test_docker_containers.py @@ -0,0 +1,228 @@ +# Copyright (c), Felix Fontein <felix@fontein.de>, 2020 +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import json +import textwrap + +import pytest + +from mock import MagicMock + +from ansible import constants as C +from ansible.errors import AnsibleError +from ansible.inventory.data import InventoryData +from ansible.inventory.manager import InventoryManager + +from ansible_collections.community.docker.plugins.inventory.docker_containers import InventoryModule + + +@pytest.fixture(scope="module") +def inventory(): + r = InventoryModule() + r.inventory = InventoryData() + return r + + +LOVING_THARP = { + 'Id': '7bd547963679e3209cafd52aff21840b755c96fd37abcd7a6e19da8da6a7f49a', + 'Name': '/loving_tharp', + 'Image': 'sha256:349f492ff18add678364a62a67ce9a13487f14293ae0af1baf02398aa432f385', + 'State': { + 'Running': True, + }, + 'Config': { + 'Image': 'quay.io/ansible/ubuntu1804-test-container:1.21.0', + }, +} + + +LOVING_THARP_STACK = { + 'Id': '7bd547963679e3209cafd52aff21840b755c96fd37abcd7a6e19da8da6a7f49a', + 'Name': '/loving_tharp', + 'Image': 'sha256:349f492ff18add678364a62a67ce9a13487f14293ae0af1baf02398aa432f385', + 'State': { + 'Running': True, + }, + 'Config': { + 'Image': 'quay.io/ansible/ubuntu1804-test-container:1.21.0', + 'Labels': { + 'com.docker.stack.namespace': 'my_stack', + }, + }, + 'NetworkSettings': { + 'Ports': { + '22/tcp': [ + { + 'HostIp': '0.0.0.0', + 'HostPort': '32802' + } + ], + }, + }, +} + + +LOVING_THARP_SERVICE = { + 'Id': '7bd547963679e3209cafd52aff21840b755c96fd37abcd7a6e19da8da6a7f49a', + 'Name': '/loving_tharp', + 'Image': 'sha256:349f492ff18add678364a62a67ce9a13487f14293ae0af1baf02398aa432f385', + 'State': { + 'Running': True, + }, + 'Config': { + 'Image': 'quay.io/ansible/ubuntu1804-test-container:1.21.0', + 'Labels': { + 'com.docker.swarm.service.name': 'my_service', + }, + }, +} + + +def create_get_option(options, default=False): + def get_option(option): + if option in options: + return options[option] + return default + + return get_option + + +class FakeClient(object): + def __init__(self, *hosts): + self.hosts = dict() + self.list_reply = [] + for host in hosts: + self.list_reply.append({ + 'Id': host['Id'], + 'Names': [host['Name']] if host['Name'] else [], + 'Image': host['Config']['Image'], + 'ImageId': host['Image'], + }) + self.hosts[host['Name']] = host + self.hosts[host['Id']] = host + + def containers(self, all=False): + return list(self.list_reply) + + def inspect_container(self, id): + return self.hosts[id] + + def port(self, container, port): + host = self.hosts[container['Id']] + network_settings = host.get('NetworkSettings') or dict() + ports = network_settings.get('Ports') or dict() + return ports.get('{0}/tcp'.format(port)) or [] + + +def test_populate(inventory, mocker): + client = FakeClient(LOVING_THARP) + + inventory.get_option = mocker.MagicMock(side_effect=create_get_option({ + 'verbose_output': True, + 'connection_type': 'docker-api', + 'add_legacy_groups': False, + 'compose': {}, + 'groups': {}, + 'keyed_groups': {}, + })) + inventory._populate(client) + + host_1 = inventory.inventory.get_host('loving_tharp') + host_1_vars = host_1.get_vars() + + assert host_1_vars['ansible_host'] == 'loving_tharp' + assert host_1_vars['ansible_connection'] == 'community.docker.docker_api' + assert 'ansible_ssh_host' not in host_1_vars + assert 'ansible_ssh_port' not in host_1_vars + assert 'docker_state' in host_1_vars + assert 'docker_config' in host_1_vars + assert 'docker_image' in host_1_vars + + assert len(inventory.inventory.groups['ungrouped'].hosts) == 0 + assert len(inventory.inventory.groups['all'].hosts) == 0 + assert len(inventory.inventory.groups) == 2 + assert len(inventory.inventory.hosts) == 1 + + +def test_populate_service(inventory, mocker): + client = FakeClient(LOVING_THARP_SERVICE) + + inventory.get_option = mocker.MagicMock(side_effect=create_get_option({ + 'verbose_output': False, + 'connection_type': 'docker-cli', + 'add_legacy_groups': True, + 'compose': {}, + 'groups': {}, + 'keyed_groups': {}, + 'docker_host': 'unix://var/run/docker.sock', + })) + inventory._populate(client) + + host_1 = inventory.inventory.get_host('loving_tharp') + host_1_vars = host_1.get_vars() + + assert host_1_vars['ansible_host'] == 'loving_tharp' + assert host_1_vars['ansible_connection'] == 'community.docker.docker' + assert 'ansible_ssh_host' not in host_1_vars + assert 'ansible_ssh_port' not in host_1_vars + assert 'docker_state' not in host_1_vars + assert 'docker_config' not in host_1_vars + assert 'docker_image' not in host_1_vars + + assert len(inventory.inventory.groups['ungrouped'].hosts) == 0 + assert len(inventory.inventory.groups['all'].hosts) == 0 + assert len(inventory.inventory.groups['7bd547963679e'].hosts) == 1 + assert len(inventory.inventory.groups['7bd547963679e3209cafd52aff21840b755c96fd37abcd7a6e19da8da6a7f49a'].hosts) == 1 + assert len(inventory.inventory.groups['image_quay.io/ansible/ubuntu1804-test-container:1.21.0'].hosts) == 1 + assert len(inventory.inventory.groups['loving_tharp'].hosts) == 1 + assert len(inventory.inventory.groups['running'].hosts) == 1 + assert len(inventory.inventory.groups['stopped'].hosts) == 0 + assert len(inventory.inventory.groups['service_my_service'].hosts) == 1 + assert len(inventory.inventory.groups['unix://var/run/docker.sock'].hosts) == 1 + assert len(inventory.inventory.groups) == 10 + assert len(inventory.inventory.hosts) == 1 + + +def test_populate_stack(inventory, mocker): + client = FakeClient(LOVING_THARP_STACK) + + inventory.get_option = mocker.MagicMock(side_effect=create_get_option({ + 'verbose_output': False, + 'connection_type': 'ssh', + 'add_legacy_groups': True, + 'compose': {}, + 'groups': {}, + 'keyed_groups': {}, + 'docker_host': 'unix://var/run/docker.sock', + 'default_ip': '127.0.0.1', + 'private_ssh_port': 22, + })) + inventory._populate(client) + + host_1 = inventory.inventory.get_host('loving_tharp') + host_1_vars = host_1.get_vars() + + assert host_1_vars['ansible_ssh_host'] == '127.0.0.1' + assert host_1_vars['ansible_ssh_port'] == '32802' + assert 'ansible_host' not in host_1_vars + assert 'ansible_connection' not in host_1_vars + assert 'docker_state' not in host_1_vars + assert 'docker_config' not in host_1_vars + assert 'docker_image' not in host_1_vars + + assert len(inventory.inventory.groups['ungrouped'].hosts) == 0 + assert len(inventory.inventory.groups['all'].hosts) == 0 + assert len(inventory.inventory.groups['7bd547963679e'].hosts) == 1 + assert len(inventory.inventory.groups['7bd547963679e3209cafd52aff21840b755c96fd37abcd7a6e19da8da6a7f49a'].hosts) == 1 + assert len(inventory.inventory.groups['image_quay.io/ansible/ubuntu1804-test-container:1.21.0'].hosts) == 1 + assert len(inventory.inventory.groups['loving_tharp'].hosts) == 1 + assert len(inventory.inventory.groups['running'].hosts) == 1 + assert len(inventory.inventory.groups['stopped'].hosts) == 0 + assert len(inventory.inventory.groups['stack_my_stack'].hosts) == 1 + assert len(inventory.inventory.groups['unix://var/run/docker.sock'].hosts) == 1 + assert len(inventory.inventory.groups) == 10 + assert len(inventory.inventory.hosts) == 1 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/module_utils/__init__.py b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/module_utils/__init__.py diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/module_utils/test_common.py b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/module_utils/test_common.py new file mode 100644 index 00000000..b9f747db --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/module_utils/test_common.py @@ -0,0 +1,518 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.community.docker.plugins.module_utils.common import ( + compare_dict_allow_more_present, + compare_generic, + convert_duration_to_nanosecond, + parse_healthcheck +) + +DICT_ALLOW_MORE_PRESENT = ( + { + 'av': {}, + 'bv': {'a': 1}, + 'result': True + }, + { + 'av': {'a': 1}, + 'bv': {'a': 1, 'b': 2}, + 'result': True + }, + { + 'av': {'a': 1}, + 'bv': {'b': 2}, + 'result': False + }, + { + 'av': {'a': 1}, + 'bv': {'a': None, 'b': 1}, + 'result': False + }, + { + 'av': {'a': None}, + 'bv': {'b': 1}, + 'result': False + }, +) + +COMPARE_GENERIC = [ + ######################################################################################## + # value + { + 'a': 1, + 'b': 2, + 'method': 'strict', + 'type': 'value', + 'result': False + }, + { + 'a': 'hello', + 'b': 'hello', + 'method': 'strict', + 'type': 'value', + 'result': True + }, + { + 'a': None, + 'b': 'hello', + 'method': 'strict', + 'type': 'value', + 'result': False + }, + { + 'a': None, + 'b': None, + 'method': 'strict', + 'type': 'value', + 'result': True + }, + { + 'a': 1, + 'b': 2, + 'method': 'ignore', + 'type': 'value', + 'result': True + }, + { + 'a': None, + 'b': 2, + 'method': 'ignore', + 'type': 'value', + 'result': True + }, + ######################################################################################## + # list + { + 'a': [ + 'x', + ], + 'b': [ + 'y', + ], + 'method': 'strict', + 'type': 'list', + 'result': False + }, + { + 'a': [ + 'x', + ], + 'b': [ + 'x', + 'x', + ], + 'method': 'strict', + 'type': 'list', + 'result': False + }, + { + 'a': [ + 'x', + 'y', + ], + 'b': [ + 'x', + 'y', + ], + 'method': 'strict', + 'type': 'list', + 'result': True + }, + { + 'a': [ + 'x', + 'y', + ], + 'b': [ + 'y', + 'x', + ], + 'method': 'strict', + 'type': 'list', + 'result': False + }, + { + 'a': [ + 'x', + 'y', + ], + 'b': [ + 'x', + ], + 'method': 'allow_more_present', + 'type': 'list', + 'result': False + }, + { + 'a': [ + 'x', + ], + 'b': [ + 'x', + 'y', + ], + 'method': 'allow_more_present', + 'type': 'list', + 'result': True + }, + { + 'a': [ + 'x', + 'x', + 'y', + ], + 'b': [ + 'x', + 'y', + ], + 'method': 'allow_more_present', + 'type': 'list', + 'result': False + }, + { + 'a': [ + 'x', + 'z', + ], + 'b': [ + 'x', + 'y', + 'x', + 'z', + ], + 'method': 'allow_more_present', + 'type': 'list', + 'result': True + }, + { + 'a': [ + 'x', + 'y', + ], + 'b': [ + 'y', + 'x', + ], + 'method': 'ignore', + 'type': 'list', + 'result': True + }, + ######################################################################################## + # set + { + 'a': [ + 'x', + ], + 'b': [ + 'y', + ], + 'method': 'strict', + 'type': 'set', + 'result': False + }, + { + 'a': [ + 'x', + ], + 'b': [ + 'x', + 'x', + ], + 'method': 'strict', + 'type': 'set', + 'result': True + }, + { + 'a': [ + 'x', + 'y', + ], + 'b': [ + 'x', + 'y', + ], + 'method': 'strict', + 'type': 'set', + 'result': True + }, + { + 'a': [ + 'x', + 'y', + ], + 'b': [ + 'y', + 'x', + ], + 'method': 'strict', + 'type': 'set', + 'result': True + }, + { + 'a': [ + 'x', + 'y', + ], + 'b': [ + 'x', + ], + 'method': 'allow_more_present', + 'type': 'set', + 'result': False + }, + { + 'a': [ + 'x', + ], + 'b': [ + 'x', + 'y', + ], + 'method': 'allow_more_present', + 'type': 'set', + 'result': True + }, + { + 'a': [ + 'x', + 'x', + 'y', + ], + 'b': [ + 'x', + 'y', + ], + 'method': 'allow_more_present', + 'type': 'set', + 'result': True + }, + { + 'a': [ + 'x', + 'z', + ], + 'b': [ + 'x', + 'y', + 'x', + 'z', + ], + 'method': 'allow_more_present', + 'type': 'set', + 'result': True + }, + { + 'a': [ + 'x', + 'a', + ], + 'b': [ + 'y', + 'z', + ], + 'method': 'ignore', + 'type': 'set', + 'result': True + }, + ######################################################################################## + # set(dict) + { + 'a': [ + {'x': 1}, + ], + 'b': [ + {'y': 1}, + ], + 'method': 'strict', + 'type': 'set(dict)', + 'result': False + }, + { + 'a': [ + {'x': 1}, + ], + 'b': [ + {'x': 1}, + ], + 'method': 'strict', + 'type': 'set(dict)', + 'result': True + }, + { + 'a': [ + {'x': 1}, + ], + 'b': [ + {'x': 1, 'y': 2}, + ], + 'method': 'strict', + 'type': 'set(dict)', + 'result': True + }, + { + 'a': [ + {'x': 1}, + {'x': 2, 'y': 3}, + ], + 'b': [ + {'x': 1}, + {'x': 2, 'y': 3}, + ], + 'method': 'strict', + 'type': 'set(dict)', + 'result': True + }, + { + 'a': [ + {'x': 1}, + ], + 'b': [ + {'x': 1, 'z': 2}, + {'x': 2, 'y': 3}, + ], + 'method': 'allow_more_present', + 'type': 'set(dict)', + 'result': True + }, + { + 'a': [ + {'x': 1, 'y': 2}, + ], + 'b': [ + {'x': 1}, + {'x': 2, 'y': 3}, + ], + 'method': 'allow_more_present', + 'type': 'set(dict)', + 'result': False + }, + { + 'a': [ + {'x': 1, 'y': 3}, + ], + 'b': [ + {'x': 1}, + {'x': 1, 'y': 3, 'z': 4}, + ], + 'method': 'allow_more_present', + 'type': 'set(dict)', + 'result': True + }, + { + 'a': [ + {'x': 1}, + {'x': 2, 'y': 3}, + ], + 'b': [ + {'x': 1}, + ], + 'method': 'ignore', + 'type': 'set(dict)', + 'result': True + }, + ######################################################################################## + # dict + { + 'a': {'x': 1}, + 'b': {'y': 1}, + 'method': 'strict', + 'type': 'dict', + 'result': False + }, + { + 'a': {'x': 1}, + 'b': {'x': 1, 'y': 2}, + 'method': 'strict', + 'type': 'dict', + 'result': False + }, + { + 'a': {'x': 1}, + 'b': {'x': 1}, + 'method': 'strict', + 'type': 'dict', + 'result': True + }, + { + 'a': {'x': 1, 'z': 2}, + 'b': {'x': 1, 'y': 2}, + 'method': 'strict', + 'type': 'dict', + 'result': False + }, + { + 'a': {'x': 1, 'z': 2}, + 'b': {'x': 1, 'y': 2}, + 'method': 'ignore', + 'type': 'dict', + 'result': True + }, +] + [{ + 'a': entry['av'], + 'b': entry['bv'], + 'method': 'allow_more_present', + 'type': 'dict', + 'result': entry['result'] +} for entry in DICT_ALLOW_MORE_PRESENT] + + +@pytest.mark.parametrize("entry", DICT_ALLOW_MORE_PRESENT) +def test_dict_allow_more_present(entry): + assert compare_dict_allow_more_present(entry['av'], entry['bv']) == entry['result'] + + +@pytest.mark.parametrize("entry", COMPARE_GENERIC) +def test_compare_generic(entry): + assert compare_generic(entry['a'], entry['b'], entry['method'], entry['type']) == entry['result'] + + +def test_convert_duration_to_nanosecond(): + nanoseconds = convert_duration_to_nanosecond('5s') + assert nanoseconds == 5000000000 + nanoseconds = convert_duration_to_nanosecond('1m5s') + assert nanoseconds == 65000000000 + with pytest.raises(ValueError): + convert_duration_to_nanosecond([1, 2, 3]) + with pytest.raises(ValueError): + convert_duration_to_nanosecond('10x') + + +def test_parse_healthcheck(): + result, disabled = parse_healthcheck({ + 'test': 'sleep 1', + 'interval': '1s', + }) + assert disabled is False + assert result == { + 'test': ['CMD-SHELL', 'sleep 1'], + 'interval': 1000000000 + } + + result, disabled = parse_healthcheck({ + 'test': ['NONE'], + }) + assert result is None + assert disabled + + result, disabled = parse_healthcheck({ + 'test': 'sleep 1', + 'interval': '1s423ms' + }) + assert result == { + 'test': ['CMD-SHELL', 'sleep 1'], + 'interval': 1423000000 + } + assert disabled is False + + result, disabled = parse_healthcheck({ + 'test': 'sleep 1', + 'interval': '1h1m2s3ms4us' + }) + assert result == { + 'test': ['CMD-SHELL', 'sleep 1'], + 'interval': 3662003004000 + } + assert disabled is False diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/__init__.py b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/__init__.py diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/conftest.py b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/conftest.py new file mode 100644 index 00000000..a7d1e047 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/conftest.py @@ -0,0 +1,31 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +import pytest + +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_bytes +from ansible.module_utils.common._collections_compat import MutableMapping + + +@pytest.fixture +def patch_ansible_module(request, mocker): + if isinstance(request.param, string_types): + args = request.param + elif isinstance(request.param, MutableMapping): + if 'ANSIBLE_MODULE_ARGS' not in request.param: + request.param = {'ANSIBLE_MODULE_ARGS': request.param} + if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']: + request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' + if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']: + request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False + args = json.dumps(request.param) + else: + raise Exception('Malformed data to the patch_ansible_module pytest fixture') + + mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_container.py b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_container.py new file mode 100644 index 00000000..00701961 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_container.py @@ -0,0 +1,22 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import unittest + +from ansible_collections.community.docker.plugins.modules.docker_container import TaskParameters + + +class TestTaskParameters(unittest.TestCase): + """Unit tests for TaskParameters.""" + + def test_parse_exposed_ports_tcp_udp(self): + """ + Ensure _parse_exposed_ports does not cancel ports with the same + number but different protocol. + """ + task_params = TaskParameters.__new__(TaskParameters) + task_params.exposed_ports = None + result = task_params._parse_exposed_ports([80, '443', '443/udp']) + self.assertTrue((80, 'tcp') in result) + self.assertTrue((443, 'tcp') in result) + self.assertTrue((443, 'udp') in result) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_network.py b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_network.py new file mode 100644 index 00000000..4b36a52c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_network.py @@ -0,0 +1,31 @@ +"""Unit tests for docker_network.""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.community.docker.plugins.modules.docker_network import validate_cidr + + +@pytest.mark.parametrize("cidr,expected", [ + ('192.168.0.1/16', 'ipv4'), + ('192.168.0.1/24', 'ipv4'), + ('192.168.0.1/32', 'ipv4'), + ('fdd1:ac8c:0557:7ce2::/64', 'ipv6'), + ('fdd1:ac8c:0557:7ce2::/128', 'ipv6'), +]) +def test_validate_cidr_positives(cidr, expected): + assert validate_cidr(cidr) == expected + + +@pytest.mark.parametrize("cidr", [ + '192.168.0.1', + '192.168.0.1/34', + '192.168.0.1/asd', + 'fdd1:ac8c:0557:7ce2::', +]) +def test_validate_cidr_negatives(cidr): + with pytest.raises(ValueError) as e: + validate_cidr(cidr) + assert '"{0}" is not a valid CIDR'.format(cidr) == str(e.value) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_swarm_service.py b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_swarm_service.py new file mode 100644 index 00000000..1fae8daf --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_swarm_service.py @@ -0,0 +1,510 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + + +class APIErrorMock(Exception): + def __init__(self, message, response=None, explanation=None): + self.message = message + self.response = response + self.explanation = explanation + + +@pytest.fixture(autouse=True) +def docker_module_mock(mocker): + docker_module_mock = mocker.MagicMock() + docker_utils_module_mock = mocker.MagicMock() + docker_errors_module_mock = mocker.MagicMock() + docker_errors_module_mock.APIError = APIErrorMock + mock_modules = { + 'docker': docker_module_mock, + 'docker.utils': docker_utils_module_mock, + 'docker.errors': docker_errors_module_mock, + } + return mocker.patch.dict('sys.modules', **mock_modules) + + +@pytest.fixture(autouse=True) +def docker_swarm_service(): + from ansible_collections.community.docker.plugins.modules import docker_swarm_service + + return docker_swarm_service + + +def test_retry_on_out_of_sequence_error(mocker, docker_swarm_service): + run_mock = mocker.MagicMock( + side_effect=APIErrorMock( + message='', + response=None, + explanation='rpc error: code = Unknown desc = update out of sequence', + ) + ) + manager = docker_swarm_service.DockerServiceManager(client=None) + manager.run = run_mock + with pytest.raises(APIErrorMock): + manager.run_safe() + assert run_mock.call_count == 3 + + +def test_no_retry_on_general_api_error(mocker, docker_swarm_service): + run_mock = mocker.MagicMock( + side_effect=APIErrorMock(message='', response=None, explanation='some error') + ) + manager = docker_swarm_service.DockerServiceManager(client=None) + manager.run = run_mock + with pytest.raises(APIErrorMock): + manager.run_safe() + assert run_mock.call_count == 1 + + +def test_get_docker_environment(mocker, docker_swarm_service): + env_file_result = {'TEST1': 'A', 'TEST2': 'B', 'TEST3': 'C'} + env_dict = {'TEST3': 'CC', 'TEST4': 'D'} + env_string = "TEST3=CC,TEST4=D" + + env_list = ['TEST3=CC', 'TEST4=D'] + expected_result = sorted(['TEST1=A', 'TEST2=B', 'TEST3=CC', 'TEST4=D']) + mocker.patch.object( + docker_swarm_service, 'parse_env_file', return_value=env_file_result + ) + mocker.patch.object( + docker_swarm_service, + 'format_environment', + side_effect=lambda d: ['{0}={1}'.format(key, value) for key, value in d.items()], + ) + # Test with env dict and file + result = docker_swarm_service.get_docker_environment( + env_dict, env_files=['dummypath'] + ) + assert result == expected_result + # Test with env list and file + result = docker_swarm_service.get_docker_environment( + env_list, + env_files=['dummypath'] + ) + assert result == expected_result + # Test with env string and file + result = docker_swarm_service.get_docker_environment( + env_string, env_files=['dummypath'] + ) + assert result == expected_result + + assert result == expected_result + # Test with empty env + result = docker_swarm_service.get_docker_environment( + [], env_files=None + ) + assert result == [] + # Test with empty env_files + result = docker_swarm_service.get_docker_environment( + None, env_files=[] + ) + assert result == [] + + +def test_get_nanoseconds_from_raw_option(docker_swarm_service): + value = docker_swarm_service.get_nanoseconds_from_raw_option('test', None) + assert value is None + + value = docker_swarm_service.get_nanoseconds_from_raw_option('test', '1m30s535ms') + assert value == 90535000000 + + value = docker_swarm_service.get_nanoseconds_from_raw_option('test', 10000000000) + assert value == 10000000000 + + with pytest.raises(ValueError): + docker_swarm_service.get_nanoseconds_from_raw_option('test', []) + + +def test_has_dict_changed(docker_swarm_service): + assert not docker_swarm_service.has_dict_changed( + {"a": 1}, + {"a": 1}, + ) + assert not docker_swarm_service.has_dict_changed( + {"a": 1}, + {"a": 1, "b": 2} + ) + assert docker_swarm_service.has_dict_changed( + {"a": 1}, + {"a": 2, "b": 2} + ) + assert docker_swarm_service.has_dict_changed( + {"a": 1, "b": 1}, + {"a": 1} + ) + assert not docker_swarm_service.has_dict_changed( + None, + {"a": 2, "b": 2} + ) + assert docker_swarm_service.has_dict_changed( + {}, + {"a": 2, "b": 2} + ) + assert docker_swarm_service.has_dict_changed( + {"a": 1}, + {} + ) + assert docker_swarm_service.has_dict_changed( + {"a": 1}, + None + ) + assert not docker_swarm_service.has_dict_changed( + {}, + {} + ) + assert not docker_swarm_service.has_dict_changed( + None, + None + ) + assert not docker_swarm_service.has_dict_changed( + {}, + None + ) + assert not docker_swarm_service.has_dict_changed( + None, + {} + ) + + +def test_has_list_changed(docker_swarm_service): + + # List comparisons without dictionaries + # I could improve the indenting, but pycodestyle wants this instead + assert not docker_swarm_service.has_list_changed(None, None) + assert not docker_swarm_service.has_list_changed(None, []) + assert not docker_swarm_service.has_list_changed(None, [1, 2]) + + assert not docker_swarm_service.has_list_changed([], None) + assert not docker_swarm_service.has_list_changed([], []) + assert docker_swarm_service.has_list_changed([], [1, 2]) + + assert docker_swarm_service.has_list_changed([1, 2], None) + assert docker_swarm_service.has_list_changed([1, 2], []) + + assert docker_swarm_service.has_list_changed([1, 2, 3], [1, 2]) + assert docker_swarm_service.has_list_changed([1, 2], [1, 2, 3]) + + # Check list sorting + assert not docker_swarm_service.has_list_changed([1, 2], [2, 1]) + assert docker_swarm_service.has_list_changed( + [1, 2], + [2, 1], + sort_lists=False + ) + + # Check type matching + assert docker_swarm_service.has_list_changed([None, 1], [2, 1]) + assert docker_swarm_service.has_list_changed([2, 1], [None, 1]) + assert docker_swarm_service.has_list_changed( + "command --with args", + ['command', '--with', 'args'] + ) + assert docker_swarm_service.has_list_changed( + ['sleep', '3400'], + [u'sleep', u'3600'], + sort_lists=False + ) + + # List comparisons with dictionaries + assert not docker_swarm_service.has_list_changed( + [{'a': 1}], + [{'a': 1}], + sort_key='a' + ) + + assert not docker_swarm_service.has_list_changed( + [{'a': 1}, {'a': 2}], + [{'a': 1}, {'a': 2}], + sort_key='a' + ) + + with pytest.raises(Exception): + docker_swarm_service.has_list_changed( + [{'a': 1}, {'a': 2}], + [{'a': 1}, {'a': 2}] + ) + + # List sort checking with sort key + assert not docker_swarm_service.has_list_changed( + [{'a': 1}, {'a': 2}], + [{'a': 2}, {'a': 1}], + sort_key='a' + ) + assert docker_swarm_service.has_list_changed( + [{'a': 1}, {'a': 2}], + [{'a': 2}, {'a': 1}], + sort_lists=False + ) + + assert docker_swarm_service.has_list_changed( + [{'a': 1}, {'a': 2}, {'a': 3}], + [{'a': 2}, {'a': 1}], + sort_key='a' + ) + assert docker_swarm_service.has_list_changed( + [{'a': 1}, {'a': 2}], + [{'a': 1}, {'a': 2}, {'a': 3}], + sort_lists=False + ) + + # Additional dictionary elements + assert not docker_swarm_service.has_list_changed( + [ + {"src": 1, "dst": 2}, + {"src": 1, "dst": 2, "protocol": "udp"}, + ], + [ + {"src": 1, "dst": 2, "protocol": "tcp"}, + {"src": 1, "dst": 2, "protocol": "udp"}, + ], + sort_key='dst' + ) + assert not docker_swarm_service.has_list_changed( + [ + {"src": 1, "dst": 2, "protocol": "udp"}, + {"src": 1, "dst": 3, "protocol": "tcp"}, + ], + [ + {"src": 1, "dst": 2, "protocol": "udp"}, + {"src": 1, "dst": 3, "protocol": "tcp"}, + ], + sort_key='dst' + ) + assert docker_swarm_service.has_list_changed( + [ + {"src": 1, "dst": 2, "protocol": "udp"}, + {"src": 1, "dst": 2}, + {"src": 3, "dst": 4}, + ], + [ + {"src": 1, "dst": 3, "protocol": "udp"}, + {"src": 1, "dst": 2, "protocol": "tcp"}, + {"src": 3, "dst": 4, "protocol": "tcp"}, + ], + sort_key='dst' + ) + assert docker_swarm_service.has_list_changed( + [ + {"src": 1, "dst": 3, "protocol": "tcp"}, + {"src": 1, "dst": 2, "protocol": "udp"}, + ], + [ + {"src": 1, "dst": 2, "protocol": "tcp"}, + {"src": 1, "dst": 2, "protocol": "udp"}, + ], + sort_key='dst' + ) + assert docker_swarm_service.has_list_changed( + [ + {"src": 1, "dst": 2, "protocol": "udp"}, + {"src": 1, "dst": 2, "protocol": "tcp", "extra": {"test": "foo"}}, + ], + [ + {"src": 1, "dst": 2, "protocol": "udp"}, + {"src": 1, "dst": 2, "protocol": "tcp"}, + ], + sort_key='dst' + ) + assert not docker_swarm_service.has_list_changed( + [{'id': '123', 'aliases': []}], + [{'id': '123'}], + sort_key='id' + ) + + +def test_have_networks_changed(docker_swarm_service): + assert not docker_swarm_service.have_networks_changed( + None, + None + ) + + assert not docker_swarm_service.have_networks_changed( + [], + None + ) + + assert not docker_swarm_service.have_networks_changed( + [{'id': 1}], + [{'id': 1}] + ) + + assert docker_swarm_service.have_networks_changed( + [{'id': 1}], + [{'id': 1}, {'id': 2}] + ) + + assert not docker_swarm_service.have_networks_changed( + [{'id': 1}, {'id': 2}], + [{'id': 1}, {'id': 2}] + ) + + assert not docker_swarm_service.have_networks_changed( + [{'id': 1}, {'id': 2}], + [{'id': 2}, {'id': 1}] + ) + + assert not docker_swarm_service.have_networks_changed( + [ + {'id': 1}, + {'id': 2, 'aliases': []} + ], + [ + {'id': 1}, + {'id': 2} + ] + ) + + assert docker_swarm_service.have_networks_changed( + [ + {'id': 1}, + {'id': 2, 'aliases': ['alias1']} + ], + [ + {'id': 1}, + {'id': 2} + ] + ) + + assert docker_swarm_service.have_networks_changed( + [ + {'id': 1}, + {'id': 2, 'aliases': ['alias1', 'alias2']} + ], + [ + {'id': 1}, + {'id': 2, 'aliases': ['alias1']} + ] + ) + + assert not docker_swarm_service.have_networks_changed( + [ + {'id': 1}, + {'id': 2, 'aliases': ['alias1', 'alias2']} + ], + [ + {'id': 1}, + {'id': 2, 'aliases': ['alias1', 'alias2']} + ] + ) + + assert not docker_swarm_service.have_networks_changed( + [ + {'id': 1}, + {'id': 2, 'aliases': ['alias1', 'alias2']} + ], + [ + {'id': 1}, + {'id': 2, 'aliases': ['alias2', 'alias1']} + ] + ) + + assert not docker_swarm_service.have_networks_changed( + [ + {'id': 1, 'options': {}}, + {'id': 2, 'aliases': ['alias1', 'alias2']}], + [ + {'id': 1}, + {'id': 2, 'aliases': ['alias2', 'alias1']} + ] + ) + + assert not docker_swarm_service.have_networks_changed( + [ + {'id': 1, 'options': {'option1': 'value1'}}, + {'id': 2, 'aliases': ['alias1', 'alias2']}], + [ + {'id': 1, 'options': {'option1': 'value1'}}, + {'id': 2, 'aliases': ['alias2', 'alias1']} + ] + ) + + assert docker_swarm_service.have_networks_changed( + [ + {'id': 1, 'options': {'option1': 'value1'}}, + {'id': 2, 'aliases': ['alias1', 'alias2']}], + [ + {'id': 1, 'options': {'option1': 'value2'}}, + {'id': 2, 'aliases': ['alias2', 'alias1']} + ] + ) + + +def test_get_docker_networks(docker_swarm_service): + network_names = [ + 'network_1', + 'network_2', + 'network_3', + 'network_4', + ] + networks = [ + network_names[0], + {'name': network_names[1]}, + {'name': network_names[2], 'aliases': ['networkalias1']}, + {'name': network_names[3], 'aliases': ['networkalias2'], 'options': {'foo': 'bar'}}, + ] + network_ids = { + network_names[0]: '1', + network_names[1]: '2', + network_names[2]: '3', + network_names[3]: '4', + } + parsed_networks = docker_swarm_service.get_docker_networks( + networks, + network_ids + ) + assert len(parsed_networks) == 4 + for i, network in enumerate(parsed_networks): + assert 'name' not in network + assert 'id' in network + expected_name = network_names[i] + assert network['id'] == network_ids[expected_name] + if i == 2: + assert network['aliases'] == ['networkalias1'] + if i == 3: + assert network['aliases'] == ['networkalias2'] + if i == 3: + assert 'foo' in network['options'] + # Test missing name + with pytest.raises(TypeError): + docker_swarm_service.get_docker_networks([{'invalid': 'err'}], {'err': 1}) + # test for invalid aliases type + with pytest.raises(TypeError): + docker_swarm_service.get_docker_networks( + [{'name': 'test', 'aliases': 1}], + {'test': 1} + ) + # Test invalid aliases elements + with pytest.raises(TypeError): + docker_swarm_service.get_docker_networks( + [{'name': 'test', 'aliases': [1]}], + {'test': 1} + ) + # Test for invalid options type + with pytest.raises(TypeError): + docker_swarm_service.get_docker_networks( + [{'name': 'test', 'options': 1}], + {'test': 1} + ) + # Test for invalid networks type + with pytest.raises(TypeError): + docker_swarm_service.get_docker_networks( + 1, + {'test': 1} + ) + # Test for non existing networks + with pytest.raises(ValueError): + docker_swarm_service.get_docker_networks( + [{'name': 'idontexist'}], + {'test': 1} + ) + # Test empty values + assert docker_swarm_service.get_docker_networks([], {}) == [] + assert docker_swarm_service.get_docker_networks(None, {}) is None + # Test invalid options + with pytest.raises(TypeError): + docker_swarm_service.get_docker_networks( + [{'name': 'test', 'nonexisting_option': 'foo'}], + {'test': '1'} + ) diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_volume.py b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_volume.py new file mode 100644 index 00000000..6bce3f38 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_volume.py @@ -0,0 +1,36 @@ +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +import pytest + +from ansible_collections.community.docker.plugins.modules import docker_volume +from ansible_collections.community.docker.plugins.module_utils import common + +pytestmark = pytest.mark.usefixtures('patch_ansible_module') + +TESTCASE_DOCKER_VOLUME = [ + { + 'name': 'daemon_config', + 'state': 'present' + } +] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_DOCKER_VOLUME, indirect=['patch_ansible_module']) +def test_create_volume_on_invalid_docker_version(mocker, capfd): + mocker.patch.object(common, 'HAS_DOCKER_PY', True) + mocker.patch.object(common, 'docker_version', '1.8.0') + + with pytest.raises(SystemExit): + docker_volume.main() + + out, dummy = capfd.readouterr() + results = json.loads(out) + assert results['failed'] + assert 'Error: Docker SDK for Python version is 1.8.0 ' in results['msg'] + assert 'Minimum version required is 1.10.0.' in results['msg'] diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/unit/requirements.txt b/collections-debian-merged/ansible_collections/community/docker/tests/unit/requirements.txt new file mode 100644 index 00000000..89f3f5a6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/unit/requirements.txt @@ -0,0 +1,2 @@ +unittest2 ; python_version < '2.7' +importlib ; python_version < '2.7' diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/utils/constraints.txt b/collections-debian-merged/ansible_collections/community/docker/tests/utils/constraints.txt new file mode 100644 index 00000000..ae6000ae --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/utils/constraints.txt @@ -0,0 +1,52 @@ +coverage >= 4.2, < 5.0.0, != 4.3.2 ; python_version <= '3.7' # features in 4.2+ required, avoid known bug in 4.3.2 on python 2.6, coverage 5.0+ incompatible +coverage >= 4.5.4, < 5.0.0 ; python_version > '3.7' # coverage had a bug in < 4.5.4 that would cause unit tests to hang in Python 3.8, coverage 5.0+ incompatible +cryptography < 2.2 ; python_version < '2.7' # cryptography 2.2 drops support for python 2.6 +deepdiff < 4.0.0 ; python_version < '3' # deepdiff 4.0.0 and later require python 3 +jinja2 < 2.11 ; python_version < '2.7' # jinja2 2.11 and later require python 2.7 or later +urllib3 < 1.24 ; python_version < '2.7' # urllib3 1.24 and later require python 2.7 or later +pywinrm >= 0.3.0 # message encryption support +sphinx < 1.6 ; python_version < '2.7' # sphinx 1.6 and later require python 2.7 or later +sphinx < 1.8 ; python_version >= '2.7' # sphinx 1.8 and later are currently incompatible with rstcheck 3.3 +pygments >= 2.4.0 # Pygments 2.4.0 includes bugfixes for YAML and YAML+Jinja lexers +wheel < 0.30.0 ; python_version < '2.7' # wheel 0.30.0 and later require python 2.7 or later +yamllint != 1.8.0, < 1.14.0 ; python_version < '2.7' # yamllint 1.8.0 and 1.14.0+ require python 2.7+ +pycrypto >= 2.6 # Need features found in 2.6 and greater +ncclient >= 0.5.2 # Need features added in 0.5.2 and greater +idna < 2.6, >= 2.5 # linode requires idna < 2.9, >= 2.5, requests requires idna < 2.6, but cryptography will cause the latest version to be installed instead +paramiko < 2.4.0 ; python_version < '2.7' # paramiko 2.4.0 drops support for python 2.6 +pytest < 3.3.0 ; python_version < '2.7' # pytest 3.3.0 drops support for python 2.6 +pytest < 5.0.0 ; python_version == '2.7' # pytest 5.0.0 and later will no longer support python 2.7 +pytest-forked < 1.0.2 ; python_version < '2.7' # pytest-forked 1.0.2 and later require python 2.7 or later +pytest-forked >= 1.0.2 ; python_version >= '2.7' # pytest-forked before 1.0.2 does not work with pytest 4.2.0+ (which requires python 2.7+) +ntlm-auth >= 1.3.0 # message encryption support using cryptography +requests < 2.20.0 ; python_version < '2.7' # requests 2.20.0 drops support for python 2.6 +requests-ntlm >= 1.1.0 # message encryption support +requests-credssp >= 0.1.0 # message encryption support +voluptuous >= 0.11.0 # Schema recursion via Self +openshift >= 0.6.2, < 0.9.0 # merge_type support +virtualenv < 16.0.0 ; python_version < '2.7' # virtualenv 16.0.0 and later require python 2.7 or later +pathspec < 0.6.0 ; python_version < '2.7' # pathspec 0.6.0 and later require python 2.7 or later +pyopenssl < 18.0.0 ; python_version < '2.7' # pyOpenSSL 18.0.0 and later require python 2.7 or later +pyfmg == 0.6.1 # newer versions do not pass current unit tests +pyyaml < 5.1 ; python_version < '2.7' # pyyaml 5.1 and later require python 2.7 or later +pycparser < 2.19 ; python_version < '2.7' # pycparser 2.19 and later require python 2.7 or later +mock >= 2.0.0 # needed for features backported from Python 3.6 unittest.mock (assert_called, assert_called_once...) +pytest-mock >= 1.4.0 # needed for mock_use_standalone_module pytest option +xmltodict < 0.12.0 ; python_version < '2.7' # xmltodict 0.12.0 and later require python 2.7 or later +lxml < 4.3.0 ; python_version < '2.7' # lxml 4.3.0 and later require python 2.7 or later +pyvmomi < 6.0.0 ; python_version < '2.7' # pyvmomi 6.0.0 and later require python 2.7 or later +pyone == 1.1.9 # newer versions do not pass current integration tests +boto3 < 1.11 ; python_version < '2.7' # boto3 1.11 drops Python 2.6 support +botocore >= 1.10.0, < 1.14 ; python_version < '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca; botocore 1.14 drops Python 2.6 support +botocore >= 1.10.0 ; python_version >= '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca +setuptools < 45 ; python_version <= '2.7' # setuptools 45 and later require python 3.5 or later +cffi >= 1.14.2, != 1.14.3 # Yanked version which older versions of pip will still install: + +# freeze pylint and its requirements for consistent test results +astroid == 2.2.5 +isort == 4.3.15 +lazy-object-proxy == 1.3.1 +mccabe == 0.6.1 +pylint == 2.3.1 +typed-ast == 1.4.0 # 1.4.0 is required to compile on Python 3.8 +wrapt == 1.11.1 diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/check_matrix.py b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/check_matrix.py new file mode 100755 index 00000000..ab8d66c0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/check_matrix.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python +"""Verify the currently executing Shippable test matrix matches the one defined in the "shippable.yml" file.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import datetime +import json +import os +import re +import sys +import time + +try: + from typing import NoReturn +except ImportError: + NoReturn = None + +try: + # noinspection PyCompatibility + from urllib2 import urlopen # pylint: disable=ansible-bad-import-from +except ImportError: + # noinspection PyCompatibility + from urllib.request import urlopen + + +def main(): # type: () -> None + """Main entry point.""" + repo_full_name = os.environ['REPO_FULL_NAME'] + required_repo_full_name = 'ansible-collections/community.docker' + + if repo_full_name != required_repo_full_name: + sys.stderr.write('Skipping matrix check on repo "%s" which is not "%s".\n' % (repo_full_name, required_repo_full_name)) + return + + with open('shippable.yml', 'rb') as yaml_file: + yaml = yaml_file.read().decode('utf-8').splitlines() + + defined_matrix = [match.group(1) for match in [re.search(r'^ *- env: T=(.*)$', line) for line in yaml] if match and match.group(1) != 'none'] + + if not defined_matrix: + fail('No matrix entries found in the "shippable.yml" file.', + 'Did you modify the "shippable.yml" file?') + + run_id = os.environ['SHIPPABLE_BUILD_ID'] + sleep = 1 + jobs = [] + + for attempts_remaining in range(4, -1, -1): + try: + jobs = json.loads(urlopen('https://api.shippable.com/jobs?runIds=%s' % run_id).read()) + + if not isinstance(jobs, list): + raise Exception('Shippable run %s data is not a list.' % run_id) + + break + except Exception as ex: + if not attempts_remaining: + fail('Unable to retrieve Shippable run %s matrix.' % run_id, + str(ex)) + + sys.stderr.write('Unable to retrieve Shippable run %s matrix: %s\n' % (run_id, ex)) + sys.stderr.write('Trying again in %d seconds...\n' % sleep) + time.sleep(sleep) + sleep *= 2 + + if len(jobs) != len(defined_matrix): + if len(jobs) == 1: + hint = '\n\nMake sure you do not use the "Rebuild with SSH" option.' + else: + hint = '' + + fail('Shippable run %s has %d jobs instead of the expected %d jobs.' % (run_id, len(jobs), len(defined_matrix)), + 'Try re-running the entire matrix.%s' % hint) + + actual_matrix = dict((job.get('jobNumber'), dict(tuple(line.split('=', 1)) for line in job.get('env', [])).get('T', '')) for job in jobs) + errors = [(job_number, test, actual_matrix.get(job_number)) for job_number, test in enumerate(defined_matrix, 1) if actual_matrix.get(job_number) != test] + + if len(errors): + error_summary = '\n'.join('Job %s expected "%s" but found "%s" instead.' % (job_number, expected, actual) for job_number, expected, actual in errors) + + fail('Shippable run %s has a job matrix mismatch.' % run_id, + 'Try re-running the entire matrix.\n\n%s' % error_summary) + + +def fail(message, output): # type: (str, str) -> NoReturn + # Include a leading newline to improve readability on Shippable "Tests" tab. + # Without this, the first line becomes indented. + output = '\n' + output.strip() + + timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + + # hack to avoid requiring junit-xml, which isn't pre-installed on Shippable outside our test containers + xml = ''' +<?xml version="1.0" encoding="utf-8"?> +<testsuites disabled="0" errors="1" failures="0" tests="1" time="0.0"> +\t<testsuite disabled="0" errors="1" failures="0" file="None" log="None" name="ansible-test" skipped="0" tests="1" time="0" timestamp="%s" url="None"> +\t\t<testcase classname="timeout" name="timeout"> +\t\t\t<error message="%s" type="error">%s</error> +\t\t</testcase> +\t</testsuite> +</testsuites> +''' % (timestamp, message, output) + + path = 'shippable/testresults/check-matrix.xml' + dir_path = os.path.dirname(path) + + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + with open(path, 'w') as junit_fd: + junit_fd.write(xml.lstrip()) + + sys.stderr.write(message + '\n') + sys.stderr.write(output + '\n') + + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/cloud.sh b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/cloud.sh new file mode 100755 index 00000000..d76c3228 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/cloud.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +cloud="${args[0]}" +python="${args[1]}" +group="${args[2]}" + +target="shippable/${cloud}/group${group}/" + +stage="${S:-prod}" + +# shellcheck disable=SC2086 +ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ + --remote-terminate always --remote-stage "${stage}" \ + --docker --python "${python}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/linux.sh b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/linux.sh new file mode 100755 index 00000000..9cc2f966 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/linux.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +image="${args[1]}" + +if [ "${#args[@]}" -gt 2 ]; then + target="shippable/posix/group${args[2]}/" +else + target="shippable/posix/" +fi + +# shellcheck disable=SC2086 +ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ + --docker "${image}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/remote.sh b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/remote.sh new file mode 100755 index 00000000..cd3014cc --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/remote.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +platform="${args[0]}" +version="${args[1]}" + +if [ "${#args[@]}" -gt 2 ]; then + target="shippable/posix/group${args[2]}/" +else + target="shippable/posix/" +fi + +stage="${S:-prod}" +provider="${P:-default}" + +# shellcheck disable=SC2086 +ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ + --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/rhel.sh b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/rhel.sh new file mode 100755 index 00000000..cd3014cc --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/rhel.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +platform="${args[0]}" +version="${args[1]}" + +if [ "${#args[@]}" -gt 2 ]; then + target="shippable/posix/group${args[2]}/" +else + target="shippable/posix/" +fi + +stage="${S:-prod}" +provider="${P:-default}" + +# shellcheck disable=SC2086 +ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ + --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/sanity.sh b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/sanity.sh new file mode 100755 index 00000000..dee0626c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/sanity.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +group="${args[1]}" + +if [ "${BASE_BRANCH:-}" ]; then + base_branch="origin/${BASE_BRANCH}" +else + base_branch="" +fi + +if [ "${group}" == "extra" ]; then + ../internal_test_tools/tools/run.py --color + exit +fi + +# shellcheck disable=SC2086 +ansible-test sanity --color -v --junit ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \ + --docker --base-branch "${base_branch}" \ + --allow-disabled diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/shippable.sh b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/shippable.sh new file mode 100755 index 00000000..e9318cdd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/shippable.sh @@ -0,0 +1,219 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +ansible_version="${args[0]}" +script="${args[1]}" + +function join { + local IFS="$1"; + shift; + echo "$*"; +} + +# Ensure we can write other collections to this dir +sudo chown "$(whoami)" "${PWD}/../../" + +test="$(join / "${args[@]:1}")" + +docker images ansible/ansible +docker images quay.io/ansible/* +docker ps + +for container in $(docker ps --format '{{.Image}} {{.ID}}' | grep -v -e '^drydock/' -e '^quay.io/ansible/azure-pipelines-test-container:' | sed 's/^.* //'); do + docker rm -f "${container}" || true # ignore errors +done + +docker ps + +if [ -d /home/shippable/cache/ ]; then + ls -la /home/shippable/cache/ +fi + +command -v python +python -V + +function retry +{ + # shellcheck disable=SC2034 + for repetition in 1 2 3; do + set +e + "$@" + result=$? + set -e + if [ ${result} == 0 ]; then + return ${result} + fi + echo "@* -> ${result}" + done + echo "Command '@*' failed 3 times!" + exit -1 +} + +command -v pip +pip --version +pip list --disable-pip-version-check +if [ "${ansible_version}" == "devel" ]; then + retry pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check +else + retry pip install "https://github.com/ansible/ansible/archive/stable-${ansible_version}.tar.gz" --disable-pip-version-check +fi + +if [ "${SHIPPABLE_BUILD_ID:-}" ]; then + export ANSIBLE_COLLECTIONS_PATHS="${HOME}/.ansible" + SHIPPABLE_RESULT_DIR="$(pwd)/shippable" + TEST_DIR="${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/docker" + mkdir -p "${TEST_DIR}" + cp -aT "${SHIPPABLE_BUILD_DIR}" "${TEST_DIR}" + cd "${TEST_DIR}" +else + export ANSIBLE_COLLECTIONS_PATHS="${PWD}/../../../" +fi + +if [ "${script}" != "sanity" ] || [ "${test}" == "sanity/extra" ]; then + # Nothing further should be added to this list. + # This is to prevent modules or plugins in this collection having a runtime dependency on other collections. + retry ansible-galaxy -vvv collection install community.internal_test_tools +fi + +if [ "${script}" != "sanity" ] && [ "${script}" != "units" ]; then + # To prevent Python dependencies on other collections only install other collections for integration tests + retry ansible-galaxy -vvv collection install ansible.posix + retry ansible-galaxy -vvv collection install community.crypto + retry ansible-galaxy -vvv collection install community.general +fi + +# END: HACK + +export PYTHONIOENCODING='utf-8' + +if [ "${JOB_TRIGGERED_BY_NAME:-}" == "nightly-trigger" ]; then + COVERAGE=yes + COMPLETE=yes +fi + +if [ -n "${COVERAGE:-}" ]; then + # on-demand coverage reporting triggered by setting the COVERAGE environment variable to a non-empty value + export COVERAGE="--coverage" +elif [[ "${COMMIT_MESSAGE}" =~ ci_coverage ]]; then + # on-demand coverage reporting triggered by having 'ci_coverage' in the latest commit message + export COVERAGE="--coverage" +else + # on-demand coverage reporting disabled (default behavior, always-on coverage reporting remains enabled) + export COVERAGE="--coverage-check" +fi + +if [ -n "${COMPLETE:-}" ]; then + # disable change detection triggered by setting the COMPLETE environment variable to a non-empty value + export CHANGED="" +elif [[ "${COMMIT_MESSAGE}" =~ ci_complete ]]; then + # disable change detection triggered by having 'ci_complete' in the latest commit message + export CHANGED="" +else + # enable change detection (default behavior) + export CHANGED="--changed" +fi + +if [ "${IS_PULL_REQUEST:-}" == "true" ]; then + # run unstable tests which are targeted by focused changes on PRs + export UNSTABLE="--allow-unstable-changed" +else + # do not run unstable tests outside PRs + export UNSTABLE="" +fi + +# remove empty core/extras module directories from PRs created prior to the repo-merge +find plugins -type d -empty -print -delete + +function cleanup +{ + # for complete on-demand coverage generate a report for all files with no coverage on the "sanity/5" job so we only have one copy + if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ] && [ "${test}" == "sanity/5" ]; then + stub="--stub" + # trigger coverage reporting for stubs even if no other coverage data exists + mkdir -p tests/output/coverage/ + else + stub="" + fi + + if [ -d tests/output/coverage/ ]; then + if find tests/output/coverage/ -mindepth 1 -name '.*' -prune -o -print -quit | grep -q .; then + process_coverage='yes' # process existing coverage files + elif [ "${stub}" ]; then + process_coverage='yes' # process coverage when stubs are enabled + else + process_coverage='' + fi + + if [ "${process_coverage}" ]; then + # use python 3.7 for coverage to avoid running out of memory during coverage xml processing + # only use it for coverage to avoid the additional overhead of setting up a virtual environment for a potential no-op job + virtualenv --python /usr/bin/python3.7 ~/ansible-venv + set +ux + . ~/ansible-venv/bin/activate + set -ux + + # shellcheck disable=SC2086 + ansible-test coverage xml --color -v --requirements --group-by command --group-by version ${stub:+"$stub"} + cp -a tests/output/reports/coverage=*.xml "$SHIPPABLE_RESULT_DIR/codecoverage/" + + if [ "${ansible_version}" != "2.9" ]; then + # analyze and capture code coverage aggregated by integration test target + ansible-test coverage analyze targets generate -v "$SHIPPABLE_RESULT_DIR/testresults/coverage-analyze-targets.json" + fi + + # upload coverage report to codecov.io only when using complete on-demand coverage + if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ]; then + for file in tests/output/reports/coverage=*.xml; do + flags="${file##*/coverage=}" + flags="${flags%-powershell.xml}" + flags="${flags%.xml}" + # remove numbered component from stub files when converting to tags + flags="${flags//stub-[0-9]*/stub}" + flags="${flags//=/,}" + flags="${flags//[^a-zA-Z0-9_,]/_}" + + bash <(curl -s https://codecov.io/bash) \ + -f "${file}" \ + -F "${flags}" \ + -n "${test}" \ + -t 8450ed26-4e94-4d07-8831-d2023d6d20a3 \ + -X coveragepy \ + -X gcov \ + -X fix \ + -X search \ + -X xcode \ + || echo "Failed to upload code coverage report to codecov.io: ${file}" + done + fi + fi + fi + + if [ -d tests/output/junit/ ]; then + cp -aT tests/output/junit/ "$SHIPPABLE_RESULT_DIR/testresults/" + fi + + if [ -d tests/output/data/ ]; then + cp -a tests/output/data/ "$SHIPPABLE_RESULT_DIR/testresults/" + fi + + if [ -d tests/output/bot/ ]; then + cp -aT tests/output/bot/ "$SHIPPABLE_RESULT_DIR/testresults/" + fi +} + +if [ "${SHIPPABLE_BUILD_ID:-}" ]; then trap cleanup EXIT; fi + +if [[ "${COVERAGE:-}" == "--coverage" ]]; then + timeout=60 +else + timeout=50 +fi + +ansible-test env --dump --show --timeout "${timeout}" --color -v + +if [ "${SHIPPABLE_BUILD_ID:-}" ]; then "tests/utils/shippable/check_matrix.py"; fi +"tests/utils/shippable/${script}.sh" "${test}" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/timing.py b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/timing.py new file mode 100755 index 00000000..fb538271 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/timing.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3.7 +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import time + +start = time.time() + +sys.stdin.reconfigure(errors='surrogateescape') +sys.stdout.reconfigure(errors='surrogateescape') + +for line in sys.stdin: + seconds = time.time() - start + sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line)) + sys.stdout.flush() diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/timing.sh b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/timing.sh new file mode 100755 index 00000000..77e25783 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/timing.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -o pipefail -eu + +"$@" 2>&1 | "$(dirname "$0")/timing.py" diff --git a/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/units.sh b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/units.sh new file mode 100755 index 00000000..f204dc87 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/docker/tests/utils/shippable/units.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +group="${args[1]}" + +if [[ "${COVERAGE:-}" == "--coverage" ]]; then + timeout=90 +else + timeout=30 +fi + +group1=() + +case "${group}" in + 1) options=("${group1[@]:+${group1[@]}}") ;; +esac + +ansible-test env --timeout "${timeout}" --color -v + +# shellcheck disable=SC2086 +ansible-test units --color -v --docker default ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \ + "${options[@]:+${options[@]}}" \ |