summaryrefslogtreecommitdiffstats
path: root/lib/ansible/modules
diff options
context:
space:
mode:
Diffstat (limited to 'lib/ansible/modules')
-rw-r--r--lib/ansible/modules/__init__.py0
-rw-r--r--lib/ansible/modules/_include.py80
-rw-r--r--lib/ansible/modules/add_host.py115
-rw-r--r--lib/ansible/modules/apt.py1487
-rw-r--r--lib/ansible/modules/apt_key.py530
-rw-r--r--lib/ansible/modules/apt_repository.py735
-rw-r--r--lib/ansible/modules/assemble.py280
-rw-r--r--lib/ansible/modules/assert.py105
-rw-r--r--lib/ansible/modules/async_status.py166
-rw-r--r--lib/ansible/modules/async_wrapper.py350
-rw-r--r--lib/ansible/modules/blockinfile.py387
-rw-r--r--lib/ansible/modules/command.py352
-rw-r--r--lib/ansible/modules/copy.py825
-rw-r--r--lib/ansible/modules/cron.py765
-rw-r--r--lib/ansible/modules/debconf.py231
-rw-r--r--lib/ansible/modules/debug.py99
-rw-r--r--lib/ansible/modules/dnf.py1468
-rw-r--r--lib/ansible/modules/dpkg_selections.py90
-rw-r--r--lib/ansible/modules/expect.py258
-rw-r--r--lib/ansible/modules/fail.py63
-rw-r--r--lib/ansible/modules/fetch.py124
-rw-r--r--lib/ansible/modules/file.py987
-rw-r--r--lib/ansible/modules/find.py534
-rw-r--r--lib/ansible/modules/gather_facts.py64
-rw-r--r--lib/ansible/modules/get_url.py706
-rw-r--r--lib/ansible/modules/getent.py200
-rw-r--r--lib/ansible/modules/git.py1418
-rw-r--r--lib/ansible/modules/group.py662
-rw-r--r--lib/ansible/modules/group_by.py89
-rw-r--r--lib/ansible/modules/hostname.py908
-rw-r--r--lib/ansible/modules/import_playbook.py77
-rw-r--r--lib/ansible/modules/import_role.py110
-rw-r--r--lib/ansible/modules/import_tasks.py77
-rw-r--r--lib/ansible/modules/include_role.py139
-rw-r--r--lib/ansible/modules/include_tasks.py99
-rw-r--r--lib/ansible/modules/include_vars.py196
-rw-r--r--lib/ansible/modules/iptables.py916
-rw-r--r--lib/ansible/modules/known_hosts.py365
-rw-r--r--lib/ansible/modules/lineinfile.py638
-rw-r--r--lib/ansible/modules/meta.py123
-rw-r--r--lib/ansible/modules/package.py87
-rw-r--r--lib/ansible/modules/package_facts.py552
-rw-r--r--lib/ansible/modules/pause.py117
-rw-r--r--lib/ansible/modules/ping.py89
-rw-r--r--lib/ansible/modules/pip.py832
-rw-r--r--lib/ansible/modules/raw.py88
-rw-r--r--lib/ansible/modules/reboot.py137
-rw-r--r--lib/ansible/modules/replace.py316
-rw-r--r--lib/ansible/modules/rpm_key.py253
-rw-r--r--lib/ansible/modules/script.py108
-rw-r--r--lib/ansible/modules/service.py1699
-rw-r--r--lib/ansible/modules/service_facts.py411
-rw-r--r--lib/ansible/modules/set_fact.py120
-rw-r--r--lib/ansible/modules/set_stats.py82
-rw-r--r--lib/ansible/modules/setup.py230
-rw-r--r--lib/ansible/modules/shell.py205
-rw-r--r--lib/ansible/modules/slurp.py123
-rw-r--r--lib/ansible/modules/stat.py560
-rw-r--r--lib/ansible/modules/subversion.py393
-rw-r--r--lib/ansible/modules/systemd.py569
-rw-r--r--lib/ansible/modules/systemd_service.py569
-rw-r--r--lib/ansible/modules/sysvinit.py364
-rw-r--r--lib/ansible/modules/tempfile.py124
-rw-r--r--lib/ansible/modules/template.py111
-rw-r--r--lib/ansible/modules/unarchive.py1115
-rw-r--r--lib/ansible/modules/uri.py779
-rw-r--r--lib/ansible/modules/user.py3253
-rw-r--r--lib/ansible/modules/validate_argument_spec.py118
-rw-r--r--lib/ansible/modules/wait_for.py689
-rw-r--r--lib/ansible/modules/wait_for_connection.py121
-rw-r--r--lib/ansible/modules/yum.py1818
-rw-r--r--lib/ansible/modules/yum_repository.py735
72 files changed, 33585 insertions, 0 deletions
diff --git a/lib/ansible/modules/__init__.py b/lib/ansible/modules/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/modules/__init__.py
diff --git a/lib/ansible/modules/_include.py b/lib/ansible/modules/_include.py
new file mode 100644
index 0000000..60deb94
--- /dev/null
+++ b/lib/ansible/modules/_include.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: include
+short_description: Include a task list
+description:
+ - Includes a file with a list of tasks to be executed in the current playbook.
+ - Lists of tasks can only be included where tasks
+ normally run (in play).
+ - Before Ansible 2.0, all includes were 'static' and were executed when the play was compiled.
+ - Static includes are not subject to most directives. For example, loops or conditionals are applied instead to each
+ inherited task.
+ - Since Ansible 2.0, task includes are dynamic and behave more like real tasks. This means they can be looped,
+ skipped and use variables from any source. Ansible tries to auto detect this, but you can use the C(static)
+ directive (which was added in Ansible 2.1) to bypass autodetection.
+ - This module is also supported for Windows targets.
+version_added: "0.6"
+deprecated:
+ why: it has too many conflicting behaviours depending on keyword combinations and it was unclear how it should behave in each case.
+ new actions were developed that were specific about each case and related behaviours.
+ alternative: include_tasks, import_tasks, import_playbook
+ removed_in: "2.16"
+ removed_from_collection: 'ansible.builtin'
+options:
+ free-form:
+ description:
+ - This module allows you to specify the name of the file directly without any other options.
+notes:
+ - This is a core feature of Ansible, rather than a module, and cannot be overridden like a module.
+ - Include has some unintuitive behaviours depending on if it is running in a static or dynamic in play or in playbook context,
+ in an effort to clarify behaviours we are moving to a new set modules (M(ansible.builtin.include_tasks),
+ M(ansible.builtin.include_role), M(ansible.builtin.import_playbook), M(ansible.builtin.import_tasks))
+ that have well established and clear behaviours.
+ - This module no longer supporst including plays. Use M(ansible.builtin.import_playbook) instead.
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_role
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_role
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+
+- hosts: all
+ tasks:
+ - ansible.builtin.debug:
+ msg: task1
+
+ - name: Include task list in play
+ ansible.builtin.include: stuff.yaml
+
+ - ansible.builtin.debug:
+ msg: task10
+
+- hosts: all
+ tasks:
+ - ansible.builtin.debug:
+ msg: task1
+
+ - name: Include task list in play only if the condition is true
+ ansible.builtin.include: "{{ hostvar }}.yaml"
+ static: no
+ when: hostvar is defined
+'''
+
+RETURN = r'''
+# This module does not return anything except tasks to execute.
+'''
diff --git a/lib/ansible/modules/add_host.py b/lib/ansible/modules/add_host.py
new file mode 100644
index 0000000..b446df5
--- /dev/null
+++ b/lib/ansible/modules/add_host.py
@@ -0,0 +1,115 @@
+# -*- mode: python -*-
+
+# Copyright: (c) 2012, Seth Vidal (@skvidal)
+# Copyright: Ansible Team
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: add_host
+short_description: Add a host (and alternatively a group) to the ansible-playbook in-memory inventory
+description:
+- Use variables to create new hosts and groups in inventory for use in later plays of the same playbook.
+- Takes variables so you can define the new hosts more fully.
+- This module is also supported for Windows targets.
+version_added: "0.9"
+options:
+ name:
+ description:
+ - The hostname/ip of the host to add to the inventory, can include a colon and a port number.
+ type: str
+ required: true
+ aliases: [ host, hostname ]
+ groups:
+ description:
+ - The groups to add the hostname to.
+ type: list
+ elements: str
+ aliases: [ group, groupname ]
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+attributes:
+ action:
+ support: full
+ core:
+ details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
+ support: partial
+ become:
+ support: none
+ bypass_host_loop:
+ support: full
+ bypass_task_loop:
+ support: none
+ check_mode:
+ details: While this makes no changes to target systems the 'in memory' inventory will still be altered
+ support: partial
+ connection:
+ support: none
+ delegation:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ platforms: all
+notes:
+- The alias C(host) of the parameter C(name) is only available on Ansible 2.4 and newer.
+- Since Ansible 2.4, the C(inventory_dir) variable is now set to C(None) instead of the 'global inventory source',
+ because you can now have multiple sources. An example was added that shows how to partially restore the previous behaviour.
+- Though this module does not change the remote host, we do provide 'changed' status as it can be useful for those trying to track inventory changes.
+- The hosts added will not bypass the C(--limit) from the command line, so both of those need to be in agreement to make them available as play targets.
+ They are still available from hostvars and for delegation as a normal part of the inventory.
+seealso:
+- module: ansible.builtin.group_by
+author:
+- Ansible Core Team
+- Seth Vidal (@skvidal)
+'''
+
+EXAMPLES = r'''
+- name: Add host to group 'just_created' with variable foo=42
+ ansible.builtin.add_host:
+ name: '{{ ip_from_ec2 }}'
+ groups: just_created
+ foo: 42
+
+- name: Add host to multiple groups
+ ansible.builtin.add_host:
+ hostname: '{{ new_ip }}'
+ groups:
+ - group1
+ - group2
+
+- name: Add a host with a non-standard port local to your machines
+ ansible.builtin.add_host:
+ name: '{{ new_ip }}:{{ new_port }}'
+
+- name: Add a host alias that we reach through a tunnel (Ansible 1.9 and older)
+ ansible.builtin.add_host:
+ hostname: '{{ new_ip }}'
+ ansible_ssh_host: '{{ inventory_hostname }}'
+ ansible_ssh_port: '{{ new_port }}'
+
+- name: Add a host alias that we reach through a tunnel (Ansible 2.0 and newer)
+ ansible.builtin.add_host:
+ hostname: '{{ new_ip }}'
+ ansible_host: '{{ inventory_hostname }}'
+ ansible_port: '{{ new_port }}'
+
+- name: Ensure inventory vars are set to the same value as the inventory_hostname has (close to pre Ansible 2.4 behaviour)
+ ansible.builtin.add_host:
+ hostname: charlie
+ inventory_dir: '{{ inventory_dir }}'
+
+- name: Add all hosts running this playbook to the done group
+ ansible.builtin.add_host:
+ name: '{{ item }}'
+ groups: done
+ loop: "{{ ansible_play_hosts }}"
+'''
diff --git a/lib/ansible/modules/apt.py b/lib/ansible/modules/apt.py
new file mode 100644
index 0000000..1b7c5d2
--- /dev/null
+++ b/lib/ansible/modules/apt.py
@@ -0,0 +1,1487 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Flowroute LLC
+# Written by Matthew Williams <matthew@flowroute.com>
+# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apt
+short_description: Manages apt-packages
+description:
+ - Manages I(apt) packages (such as for Debian/Ubuntu).
+version_added: "0.0.2"
+options:
+ name:
+ description:
+ - A list of package names, like C(foo), or package specifier with version, like C(foo=1.0) or C(foo>=1.0).
+ Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported.
+ aliases: [ package, pkg ]
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies
+ are installed. C(fixed) attempt to correct a system with broken dependencies in place.
+ type: str
+ default: present
+ choices: [ absent, build-dep, latest, present, fixed ]
+ update_cache:
+ description:
+ - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
+ - Default is not to update the cache.
+ aliases: [ update-cache ]
+ type: bool
+ update_cache_retries:
+ description:
+ - Amount of retries if the cache update fails. Also see I(update_cache_retry_max_delay).
+ type: int
+ default: 5
+ version_added: '2.10'
+ update_cache_retry_max_delay:
+ description:
+ - Use an exponential backoff delay for each retry (see I(update_cache_retries)) up to this max delay in seconds.
+ type: int
+ default: 12
+ version_added: '2.10'
+ cache_valid_time:
+ description:
+ - Update the apt cache if it is older than the I(cache_valid_time). This option is set in seconds.
+ - As of Ansible 2.4, if explicitly set, this sets I(update_cache=yes).
+ type: int
+ default: 0
+ purge:
+ description:
+ - Will force purging of configuration files if the module state is set to I(absent).
+ type: bool
+ default: 'no'
+ default_release:
+ description:
+ - Corresponds to the C(-t) option for I(apt) and sets pin priorities
+ aliases: [ default-release ]
+ type: str
+ install_recommends:
+ description:
+ - Corresponds to the C(--no-install-recommends) option for I(apt). C(true) installs recommended packages. C(false) does not install
+ recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
+ aliases: [ install-recommends ]
+ type: bool
+ force:
+ description:
+ - 'Corresponds to the C(--force-yes) to I(apt-get) and implies C(allow_unauthenticated: yes) and C(allow_downgrade: yes)'
+ - "This option will disable checking both the packages' signatures and the certificates of the
+ web servers they are downloaded from."
+ - 'This option *is not* the equivalent of passing the C(-f) flag to I(apt-get) on the command line'
+ - '**This is a destructive operation with the potential to destroy your system, and it should almost never be used.**
+ Please also see C(man apt-get) for more information.'
+ type: bool
+ default: 'no'
+ clean:
+ description:
+ - Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything but
+ the lock file from /var/cache/apt/archives/ and /var/cache/apt/archives/partial/.
+ - Can be run as part of the package installation (clean runs before install) or as a separate step.
+ type: bool
+ default: 'no'
+ version_added: "2.13"
+ allow_unauthenticated:
+ description:
+ - Ignore if packages cannot be authenticated. This is useful for bootstrapping environments that manage their own apt-key setup.
+ - 'C(allow_unauthenticated) is only supported with state: I(install)/I(present)'
+ aliases: [ allow-unauthenticated ]
+ type: bool
+ default: 'no'
+ version_added: "2.1"
+ allow_downgrade:
+ description:
+ - Corresponds to the C(--allow-downgrades) option for I(apt).
+ - This option enables the named package and version to replace an already installed higher version of that package.
+ - Note that setting I(allow_downgrade=true) can make this module behave in a non-idempotent way.
+ - (The task could end up with a set of packages that does not match the complete list of specified packages to install).
+ aliases: [ allow-downgrade, allow_downgrades, allow-downgrades ]
+ type: bool
+ default: 'no'
+ version_added: "2.12"
+ allow_change_held_packages:
+ description:
+ - Allows changing the version of a package which is on the apt hold list
+ type: bool
+ default: 'no'
+ version_added: '2.13'
+ upgrade:
+ description:
+ - If yes or safe, performs an aptitude safe-upgrade.
+ - If full, performs an aptitude full-upgrade.
+ - If dist, performs an apt-get dist-upgrade.
+ - 'Note: This does not upgrade a specific package, use state=latest for that.'
+ - 'Note: Since 2.4, apt-get is used as a fall-back if aptitude is not present.'
+ version_added: "1.1"
+ choices: [ dist, full, 'no', safe, 'yes' ]
+ default: 'no'
+ type: str
+ dpkg_options:
+ description:
+ - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"'
+ - Options should be supplied as comma separated list
+ default: force-confdef,force-confold
+ type: str
+ deb:
+ description:
+ - Path to a .deb package on the remote machine.
+ - If :// in the path, ansible will attempt to download deb before installing. (Version added 2.1)
+ - Requires the C(xz-utils) package to extract the control file of the deb package to install.
+ type: path
+ required: false
+ version_added: "1.6"
+ autoremove:
+ description:
+ - If C(true), remove unused dependency packages for all module states except I(build-dep). It can also be used as the only option.
+ - Previous to version 2.4, autoclean was also an alias for autoremove, now it is its own separate command. See documentation for further information.
+ type: bool
+ default: 'no'
+ version_added: "2.1"
+ autoclean:
+ description:
+ - If C(true), cleans the local repository of retrieved package files that can no longer be downloaded.
+ type: bool
+ default: 'no'
+ version_added: "2.4"
+ policy_rc_d:
+ description:
+ - Force the exit code of /usr/sbin/policy-rc.d.
+ - For example, if I(policy_rc_d=101) the installed package will not trigger a service start.
+ - If /usr/sbin/policy-rc.d already exists, it is backed up and restored after the package installation.
+ - If C(null), the /usr/sbin/policy-rc.d isn't created/changed.
+ type: int
+ default: null
+ version_added: "2.8"
+ only_upgrade:
+ description:
+ - Only upgrade a package if it is already installed.
+ type: bool
+ default: 'no'
+ version_added: "2.1"
+ fail_on_autoremove:
+ description:
+ - 'Corresponds to the C(--no-remove) option for C(apt).'
+ - 'If C(true), it is ensured that no packages will be removed or the task will fail.'
+ - 'C(fail_on_autoremove) is only supported with state except C(absent)'
+ type: bool
+ default: 'no'
+ version_added: "2.11"
+ force_apt_get:
+ description:
+ - Force usage of apt-get instead of aptitude
+ type: bool
+ default: 'no'
+ version_added: "2.4"
+ lock_timeout:
+ description:
+ - How many seconds will this action wait to acquire a lock on the apt db.
+ - Sometimes there is a transitory lock and this will retry at least until timeout is hit.
+ type: int
+ default: 60
+ version_added: "2.12"
+requirements:
+ - python-apt (python 2)
+ - python3-apt (python 3)
+ - aptitude (before 2.4)
+author: "Matthew Williams (@mgwilliams)"
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: debian
+notes:
+ - Three of the upgrade modes (C(full), C(safe) and its alias C(true)) required C(aptitude) up to 2.3, since 2.4 C(apt-get) is used as a fall-back.
+ - In most cases, packages installed with apt will start newly installed services by default. Most distributions have mechanisms to avoid this.
+ For example when installing Postgresql-9.5 in Debian 9, creating an excutable shell script (/usr/sbin/policy-rc.d) that throws
+ a return code of 101 will stop Postgresql 9.5 starting up after install. Remove the file or remove its execute permission afterwards.
+ - The apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier
+ (If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user.
+ Since we don't have warnings and prompts before installing we disallow this.Use an explicit fnmatch pattern if you want wildcarding)
+ - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option.
+ - When C(default_release) is used, an implicit priority of 990 is used. This is the same behavior as C(apt-get -t).
+ - When an exact version is specified, an implicit priority of 1001 is used.
+'''
+
+EXAMPLES = '''
+- name: Install apache httpd (state=present is optional)
+ ansible.builtin.apt:
+ name: apache2
+ state: present
+
+- name: Update repositories cache and install "foo" package
+ ansible.builtin.apt:
+ name: foo
+ update_cache: yes
+
+- name: Remove "foo" package
+ ansible.builtin.apt:
+ name: foo
+ state: absent
+
+- name: Install the package "foo"
+ ansible.builtin.apt:
+ name: foo
+
+- name: Install a list of packages
+ ansible.builtin.apt:
+ pkg:
+ - foo
+ - foo-tools
+
+- name: Install the version '1.00' of package "foo"
+ ansible.builtin.apt:
+ name: foo=1.00
+
+- name: Update the repository cache and update package "nginx" to latest version using default release squeeze-backport
+ ansible.builtin.apt:
+ name: nginx
+ state: latest
+ default_release: squeeze-backports
+ update_cache: yes
+
+- name: Install the version '1.18.0' of package "nginx" and allow potential downgrades
+ ansible.builtin.apt:
+ name: nginx=1.18.0
+ state: present
+ allow_downgrade: yes
+
+- name: Install zfsutils-linux with ensuring conflicted packages (e.g. zfs-fuse) will not be removed.
+ ansible.builtin.apt:
+ name: zfsutils-linux
+ state: latest
+ fail_on_autoremove: yes
+
+- name: Install latest version of "openjdk-6-jdk" ignoring "install-recommends"
+ ansible.builtin.apt:
+ name: openjdk-6-jdk
+ state: latest
+ install_recommends: no
+
+- name: Update all packages to their latest version
+ ansible.builtin.apt:
+ name: "*"
+ state: latest
+
+- name: Upgrade the OS (apt-get dist-upgrade)
+ ansible.builtin.apt:
+ upgrade: dist
+
+- name: Run the equivalent of "apt-get update" as a separate step
+ ansible.builtin.apt:
+ update_cache: yes
+
+- name: Only run "update_cache=yes" if the last one is more than 3600 seconds ago
+ ansible.builtin.apt:
+ update_cache: yes
+ cache_valid_time: 3600
+
+- name: Pass options to dpkg on run
+ ansible.builtin.apt:
+ upgrade: dist
+ update_cache: yes
+ dpkg_options: 'force-confold,force-confdef'
+
+- name: Install a .deb package
+ ansible.builtin.apt:
+ deb: /tmp/mypackage.deb
+
+- name: Install the build dependencies for package "foo"
+ ansible.builtin.apt:
+ pkg: foo
+ state: build-dep
+
+- name: Install a .deb package from the internet
+ ansible.builtin.apt:
+ deb: https://example.com/python-ppq_0.1-1_all.deb
+
+- name: Remove useless packages from the cache
+ ansible.builtin.apt:
+ autoclean: yes
+
+- name: Remove dependencies that are no longer required
+ ansible.builtin.apt:
+ autoremove: yes
+
+- name: Run the equivalent of "apt-get clean" as a separate step
+ apt:
+ clean: yes
+'''
+
+RETURN = '''
+cache_updated:
+ description: if the cache was updated or not
+ returned: success, in some cases
+ type: bool
+ sample: True
+cache_update_time:
+ description: time of the last cache update (0 if unknown)
+ returned: success, in some cases
+ type: int
+ sample: 1425828348000
+stdout:
+ description: output from apt
+ returned: success, when needed
+ type: str
+ sample: |-
+ Reading package lists...
+ Building dependency tree...
+ Reading state information...
+ The following extra packages will be installed:
+ apache2-bin ...
+stderr:
+ description: error output from apt
+ returned: success, when needed
+ type: str
+ sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..."
+''' # NOQA
+
+# added to stave off future warnings about apt api
+import warnings
+warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning)
+
+import datetime
+import fnmatch
+import itertools
+import os
+import random
+import re
+import shutil
+import sys
+import tempfile
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.six import PY3, string_types
+from ansible.module_utils.urls import fetch_file
+
+DPKG_OPTIONS = 'force-confdef,force-confold'
+APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
+APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed"
+APT_LISTS_PATH = "/var/lib/apt/lists"
+APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
+APT_MARK_INVALID_OP = 'Invalid operation'
+APT_MARK_INVALID_OP_DEB6 = 'Usage: apt-mark [options] {markauto|unmarkauto} packages'
+
+CLEAN_OP_CHANGED_STR = dict(
+ autoremove='The following packages will be REMOVED',
+ # "Del python3-q 2.4-1 [24 kB]"
+ autoclean='Del ',
+)
+
+
+HAS_PYTHON_APT = False
+try:
+ import apt
+ import apt.debfile
+ import apt_pkg
+ HAS_PYTHON_APT = True
+except ImportError:
+ apt = apt_pkg = None
+
+
+class PolicyRcD(object):
+ """
+ This class is a context manager for the /usr/sbin/policy-rc.d file.
+ It allow the user to prevent dpkg to start the corresponding service when installing
+ a package.
+ https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
+ """
+
+ def __init__(self, module):
+ # we need the module for later use (eg. fail_json)
+ self.m = module
+
+ # if policy_rc_d is null then we don't need to modify policy-rc.d
+ if self.m.params['policy_rc_d'] is None:
+ return
+
+ # if the /usr/sbin/policy-rc.d already exists
+ # we will back it up during package installation
+ # then restore it
+ if os.path.exists('/usr/sbin/policy-rc.d'):
+ self.backup_dir = tempfile.mkdtemp(prefix="ansible")
+ else:
+ self.backup_dir = None
+
+ def __enter__(self):
+ """
+ This method will be called when we enter the context, before we call `apt-get …`
+ """
+
+ # if policy_rc_d is null then we don't need to modify policy-rc.d
+ if self.m.params['policy_rc_d'] is None:
+ return
+
+ # if the /usr/sbin/policy-rc.d already exists we back it up
+ if self.backup_dir:
+ try:
+ shutil.move('/usr/sbin/policy-rc.d', self.backup_dir)
+ except Exception:
+ self.m.fail_json(msg="Fail to move /usr/sbin/policy-rc.d to %s" % self.backup_dir)
+
+ # we write /usr/sbin/policy-rc.d so it always exits with code policy_rc_d
+ try:
+ with open('/usr/sbin/policy-rc.d', 'w') as policy_rc_d:
+ policy_rc_d.write('#!/bin/sh\nexit %d\n' % self.m.params['policy_rc_d'])
+
+ os.chmod('/usr/sbin/policy-rc.d', 0o0755)
+ except Exception:
+ self.m.fail_json(msg="Failed to create or chmod /usr/sbin/policy-rc.d")
+
+ def __exit__(self, type, value, traceback):
+ """
+ This method will be called when we enter the context, before we call `apt-get …`
+ """
+
+ # if policy_rc_d is null then we don't need to modify policy-rc.d
+ if self.m.params['policy_rc_d'] is None:
+ return
+
+ if self.backup_dir:
+ # if /usr/sbin/policy-rc.d already exists before the call to __enter__
+ # we restore it (from the backup done in __enter__)
+ try:
+ shutil.move(os.path.join(self.backup_dir, 'policy-rc.d'),
+ '/usr/sbin/policy-rc.d')
+ os.rmdir(self.backup_dir)
+ except Exception:
+ self.m.fail_json(msg="Fail to move back %s to /usr/sbin/policy-rc.d"
+ % os.path.join(self.backup_dir, 'policy-rc.d'))
+ else:
+ # if there wasn't a /usr/sbin/policy-rc.d file before the call to __enter__
+ # we just remove the file
+ try:
+ os.remove('/usr/sbin/policy-rc.d')
+ except Exception:
+ self.m.fail_json(msg="Fail to remove /usr/sbin/policy-rc.d (after package manipulation)")
+
+
+def package_split(pkgspec):
+ parts = re.split(r'(>?=)', pkgspec, 1)
+ if len(parts) > 1:
+ return parts
+ return parts[0], None, None
+
+
+def package_version_compare(version, other_version):
+ try:
+ return apt_pkg.version_compare(version, other_version)
+ except AttributeError:
+ return apt_pkg.VersionCompare(version, other_version)
+
+
+def package_best_match(pkgname, version_cmp, version, release, cache):
+ policy = apt_pkg.Policy(cache)
+
+ policy.read_pinfile(apt_pkg.config.find_file("Dir::Etc::preferences"))
+ policy.read_pindir(apt_pkg.config.find_file("Dir::Etc::preferencesparts"))
+
+ if release:
+ # 990 is the priority used in `apt-get -t`
+ policy.create_pin('Release', pkgname, release, 990)
+ if version_cmp == "=":
+ # Installing a specific version from command line overrides all pinning
+ # We don't mimmic this exactly, but instead set a priority which is higher than all APT built-in pin priorities.
+ policy.create_pin('Version', pkgname, version, 1001)
+ pkg = cache[pkgname]
+ pkgver = policy.get_candidate_ver(pkg)
+ if not pkgver:
+ return None
+ if version_cmp == "=" and not fnmatch.fnmatch(pkgver.ver_str, version):
+ # Even though we put in a pin policy, it can be ignored if there is no
+ # possible candidate.
+ return None
+ return pkgver.ver_str
+
+
+def package_status(m, pkgname, version_cmp, version, default_release, cache, state):
+ """
+ :return: A tuple of (installed, installed_version, version_installable, has_files). *installed* indicates whether
+ the package (regardless of version) is installed. *installed_version* indicates whether the installed package
+ matches the provided version criteria. *version_installable* provides the latest matching version that can be
+ installed. In the case of virtual packages where we can't determine an applicable match, True is returned.
+ *has_files* indicates whether the package has files on the filesystem (even if not installed, meaning a purge is
+ required).
+ """
+ try:
+ # get the package from the cache, as well as the
+ # low-level apt_pkg.Package object which contains
+ # state fields not directly accessible from the
+ # higher-level apt.package.Package object.
+ pkg = cache[pkgname]
+ ll_pkg = cache._cache[pkgname] # the low-level package object
+ except KeyError:
+ if state == 'install':
+ try:
+ provided_packages = cache.get_providing_packages(pkgname)
+ if provided_packages:
+ # When this is a virtual package satisfied by only
+ # one installed package, return the status of the target
+ # package to avoid requesting re-install
+ if cache.is_virtual_package(pkgname) and len(provided_packages) == 1:
+ package = provided_packages[0]
+ installed, installed_version, version_installable, has_files = \
+ package_status(m, package.name, version_cmp, version, default_release, cache, state='install')
+ if installed:
+ return installed, installed_version, version_installable, has_files
+
+ # Otherwise return nothing so apt will sort out
+ # what package to satisfy this with
+ return False, False, True, False
+
+ m.fail_json(msg="No package matching '%s' is available" % pkgname)
+ except AttributeError:
+ # python-apt version too old to detect virtual packages
+ # mark as not installed and let apt-get install deal with it
+ return False, False, True, False
+ else:
+ return False, False, None, False
+ try:
+ has_files = len(pkg.installed_files) > 0
+ except UnicodeDecodeError:
+ has_files = True
+ except AttributeError:
+ has_files = False # older python-apt cannot be used to determine non-purged
+
+ try:
+ package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED
+ except AttributeError: # python-apt 0.7.X has very weak low-level object
+ try:
+ # might not be necessary as python-apt post-0.7.X should have current_state property
+ package_is_installed = pkg.is_installed
+ except AttributeError:
+ # assume older version of python-apt is installed
+ package_is_installed = pkg.isInstalled
+
+ version_best = package_best_match(pkgname, version_cmp, version, default_release, cache._cache)
+ version_is_installed = False
+ version_installable = None
+ if package_is_installed:
+ try:
+ installed_version = pkg.installed.version
+ except AttributeError:
+ installed_version = pkg.installedVersion
+
+ if version_cmp == "=":
+ # check if the version is matched as well
+ version_is_installed = fnmatch.fnmatch(installed_version, version)
+ if version_best and installed_version != version_best and fnmatch.fnmatch(version_best, version):
+ version_installable = version_best
+ elif version_cmp == ">=":
+ version_is_installed = apt_pkg.version_compare(installed_version, version) >= 0
+ if version_best and installed_version != version_best and apt_pkg.version_compare(version_best, version) >= 0:
+ version_installable = version_best
+ else:
+ version_is_installed = True
+ if version_best and installed_version != version_best:
+ version_installable = version_best
+ else:
+ version_installable = version_best
+
+ return package_is_installed, version_is_installed, version_installable, has_files
+
+
+def expand_dpkg_options(dpkg_options_compressed):
+ options_list = dpkg_options_compressed.split(',')
+ dpkg_options = ""
+ for dpkg_option in options_list:
+ dpkg_options = '%s -o "Dpkg::Options::=--%s"' \
+ % (dpkg_options, dpkg_option)
+ return dpkg_options.strip()
+
+
+def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
+ # Note: apt-get does implicit regex matching when an exact package name
+ # match is not found. Something like this:
+ # matches = [pkg.name for pkg in cache if re.match(pkgspec, pkg.name)]
+ # (Should also deal with the ':' for multiarch like the fnmatch code below)
+ #
+ # We have decided not to do similar implicit regex matching but might take
+ # a PR to add some sort of explicit regex matching:
+ # https://github.com/ansible/ansible-modules-core/issues/1258
+ new_pkgspec = []
+ if pkgspec:
+ for pkgspec_pattern in pkgspec:
+
+ if not isinstance(pkgspec_pattern, string_types):
+ m.fail_json(msg="Invalid type for package name, expected string but got %s" % type(pkgspec_pattern))
+
+ pkgname_pattern, version_cmp, version = package_split(pkgspec_pattern)
+
+ # note that none of these chars is allowed in a (debian) pkgname
+ if frozenset('*?[]!').intersection(pkgname_pattern):
+ # handle multiarch pkgnames, the idea is that "apt*" should
+ # only select native packages. But "apt*:i386" should still work
+ if ":" not in pkgname_pattern:
+ # Filter the multiarch packages from the cache only once
+ try:
+ pkg_name_cache = _non_multiarch # pylint: disable=used-before-assignment
+ except NameError:
+ pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if ':' not in pkg.name] # noqa: F841
+ else:
+ # Create a cache of pkg_names including multiarch only once
+ try:
+ pkg_name_cache = _all_pkg_names # pylint: disable=used-before-assignment
+ except NameError:
+ pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache] # noqa: F841
+
+ matches = fnmatch.filter(pkg_name_cache, pkgname_pattern)
+
+ if not matches:
+ m.fail_json(msg="No package(s) matching '%s' available" % to_text(pkgname_pattern))
+ else:
+ new_pkgspec.extend(matches)
+ else:
+ # No wildcards in name
+ new_pkgspec.append(pkgspec_pattern)
+ return new_pkgspec
+
+
+def parse_diff(output):
+ diff = to_native(output).splitlines()
+ try:
+ # check for start marker from aptitude
+ diff_start = diff.index('Resolving dependencies...')
+ except ValueError:
+ try:
+ # check for start marker from apt-get
+ diff_start = diff.index('Reading state information...')
+ except ValueError:
+ # show everything
+ diff_start = -1
+ try:
+ # check for end marker line from both apt-get and aptitude
+ diff_end = next(i for i, item in enumerate(diff) if re.match('[0-9]+ (packages )?upgraded', item))
+ except StopIteration:
+ diff_end = len(diff)
+ diff_start += 1
+ diff_end += 1
+ return {'prepared': '\n'.join(diff[diff_start:diff_end])}
+
+
+def mark_installed_manually(m, packages):
+ if not packages:
+ return
+
+ apt_mark_cmd_path = m.get_bin_path("apt-mark")
+
+ # https://github.com/ansible/ansible/issues/40531
+ if apt_mark_cmd_path is None:
+ m.warn("Could not find apt-mark binary, not marking package(s) as manually installed.")
+ return
+
+ cmd = "%s manual %s" % (apt_mark_cmd_path, ' '.join(packages))
+ rc, out, err = m.run_command(cmd)
+
+ if APT_MARK_INVALID_OP in err or APT_MARK_INVALID_OP_DEB6 in err:
+ cmd = "%s unmarkauto %s" % (apt_mark_cmd_path, ' '.join(packages))
+ rc, out, err = m.run_command(cmd)
+
+ if rc != 0:
+ m.fail_json(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
+
+
+def install(m, pkgspec, cache, upgrade=False, default_release=None,
+ install_recommends=None, force=False,
+ dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
+ build_dep=False, fixed=False, autoremove=False, fail_on_autoremove=False, only_upgrade=False,
+ allow_unauthenticated=False, allow_downgrade=False, allow_change_held_packages=False):
+ pkg_list = []
+ packages = ""
+ pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
+ package_names = []
+ for package in pkgspec:
+ if build_dep:
+ # Let apt decide what to install
+ pkg_list.append("'%s'" % package)
+ continue
+
+ name, version_cmp, version = package_split(package)
+ package_names.append(name)
+ installed, installed_version, version_installable, has_files = package_status(m, name, version_cmp, version, default_release, cache, state='install')
+
+ if not installed and only_upgrade:
+ # only_upgrade upgrades packages that are already installed
+ # since this package is not installed, skip it
+ continue
+
+ if not installed_version and not version_installable:
+ status = False
+ data = dict(msg="no available installation candidate for %s" % package)
+ return (status, data)
+
+ if version_installable and ((not installed and not only_upgrade) or upgrade or not installed_version):
+ if version_installable is not True:
+ pkg_list.append("'%s=%s'" % (name, version_installable))
+ elif version:
+ pkg_list.append("'%s=%s'" % (name, version))
+ else:
+ pkg_list.append("'%s'" % name)
+ elif installed_version and version_installable and version_cmp == "=":
+ # This happens when the package is installed, a newer version is
+ # available, and the version is a wildcard that matches both
+ #
+ # This is legacy behavior, and isn't documented (in fact it does
+ # things documentations says it shouldn't). It should not be relied
+ # upon.
+ pkg_list.append("'%s=%s'" % (name, version))
+ packages = ' '.join(pkg_list)
+
+ if packages:
+ if force:
+ force_yes = '--force-yes'
+ else:
+ force_yes = ''
+
+ if m.check_mode:
+ check_arg = '--simulate'
+ else:
+ check_arg = ''
+
+ if autoremove:
+ autoremove = '--auto-remove'
+ else:
+ autoremove = ''
+
+ if fail_on_autoremove:
+ fail_on_autoremove = '--no-remove'
+ else:
+ fail_on_autoremove = ''
+
+ if only_upgrade:
+ only_upgrade = '--only-upgrade'
+ else:
+ only_upgrade = ''
+
+ if fixed:
+ fixed = '--fix-broken'
+ else:
+ fixed = ''
+
+ if build_dep:
+ cmd = "%s -y %s %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, fail_on_autoremove, check_arg, packages)
+ else:
+ cmd = "%s -y %s %s %s %s %s %s %s install %s" % \
+ (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, fail_on_autoremove, check_arg, packages)
+
+ if default_release:
+ cmd += " -t '%s'" % (default_release,)
+
+ if install_recommends is False:
+ cmd += " -o APT::Install-Recommends=no"
+ elif install_recommends is True:
+ cmd += " -o APT::Install-Recommends=yes"
+ # install_recommends is None uses the OS default
+
+ if allow_unauthenticated:
+ cmd += " --allow-unauthenticated"
+
+ if allow_downgrade:
+ cmd += " --allow-downgrades"
+
+ if allow_change_held_packages:
+ cmd += " --allow-change-held-packages"
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd)
+
+ if m._diff:
+ diff = parse_diff(out)
+ else:
+ diff = {}
+ status = True
+
+ changed = True
+ if build_dep:
+ changed = APT_GET_ZERO not in out
+
+ data = dict(changed=changed, stdout=out, stderr=err, diff=diff)
+ if rc:
+ status = False
+ data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
+ else:
+ status = True
+ data = dict(changed=False)
+
+ if not build_dep and not m.check_mode:
+ mark_installed_manually(m, package_names)
+
+ return (status, data)
+
+
+def get_field_of_deb(m, deb_file, field="Version"):
+ cmd_dpkg = m.get_bin_path("dpkg", True)
+ cmd = cmd_dpkg + " --field %s %s" % (deb_file, field)
+ rc, stdout, stderr = m.run_command(cmd)
+ if rc != 0:
+ m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
+ return to_native(stdout).strip('\n')
+
+
+def install_deb(
+ m, debs, cache, force, fail_on_autoremove, install_recommends,
+ allow_unauthenticated,
+ allow_downgrade,
+ allow_change_held_packages,
+ dpkg_options,
+):
+ changed = False
+ deps_to_install = []
+ pkgs_to_install = []
+ for deb_file in debs.split(','):
+ try:
+ pkg = apt.debfile.DebPackage(deb_file, cache=apt.Cache())
+ pkg_name = get_field_of_deb(m, deb_file, "Package")
+ pkg_version = get_field_of_deb(m, deb_file, "Version")
+ if hasattr(apt_pkg, 'get_architectures') and len(apt_pkg.get_architectures()) > 1:
+ pkg_arch = get_field_of_deb(m, deb_file, "Architecture")
+ pkg_key = "%s:%s" % (pkg_name, pkg_arch)
+ else:
+ pkg_key = pkg_name
+ try:
+ installed_pkg = apt.Cache()[pkg_key]
+ installed_version = installed_pkg.installed.version
+ if package_version_compare(pkg_version, installed_version) == 0:
+ # Does not need to down-/upgrade, move on to next package
+ continue
+ except Exception:
+ # Must not be installed, continue with installation
+ pass
+ # Check if package is installable
+ if not pkg.check():
+ if force or ("later version" in pkg._failure_string and allow_downgrade):
+ pass
+ else:
+ m.fail_json(msg=pkg._failure_string)
+
+ # add any missing deps to the list of deps we need
+ # to install so they're all done in one shot
+ deps_to_install.extend(pkg.missing_deps)
+
+ except Exception as e:
+ m.fail_json(msg="Unable to install package: %s" % to_native(e))
+
+ # and add this deb to the list of packages to install
+ pkgs_to_install.append(deb_file)
+
+ # install the deps through apt
+ retvals = {}
+ if deps_to_install:
+ (success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
+ install_recommends=install_recommends,
+ fail_on_autoremove=fail_on_autoremove,
+ allow_unauthenticated=allow_unauthenticated,
+ allow_downgrade=allow_downgrade,
+ allow_change_held_packages=allow_change_held_packages,
+ dpkg_options=expand_dpkg_options(dpkg_options))
+ if not success:
+ m.fail_json(**retvals)
+ changed = retvals.get('changed', False)
+
+ if pkgs_to_install:
+ options = ' '.join(["--%s" % x for x in dpkg_options.split(",")])
+ if m.check_mode:
+ options += " --simulate"
+ if force:
+ options += " --force-all"
+
+ cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd)
+
+ if "stdout" in retvals:
+ stdout = retvals["stdout"] + out
+ else:
+ stdout = out
+ if "diff" in retvals:
+ diff = retvals["diff"]
+ if 'prepared' in diff:
+ diff['prepared'] += '\n\n' + out
+ else:
+ diff = parse_diff(out)
+ if "stderr" in retvals:
+ stderr = retvals["stderr"] + err
+ else:
+ stderr = err
+
+ if rc == 0:
+ m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff)
+ else:
+ m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
+ else:
+ m.exit_json(changed=changed, stdout=retvals.get('stdout', ''), stderr=retvals.get('stderr', ''), diff=retvals.get('diff', ''))
+
+
+def remove(m, pkgspec, cache, purge=False, force=False,
+ dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False):
+ pkg_list = []
+ pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
+ for package in pkgspec:
+ name, version_cmp, version = package_split(package)
+ installed, installed_version, upgradable, has_files = package_status(m, name, version_cmp, version, None, cache, state='remove')
+ if installed_version or (has_files and purge):
+ pkg_list.append("'%s'" % package)
+ packages = ' '.join(pkg_list)
+
+ if not packages:
+ m.exit_json(changed=False)
+ else:
+ if force:
+ force_yes = '--force-yes'
+ else:
+ force_yes = ''
+
+ if purge:
+ purge = '--purge'
+ else:
+ purge = ''
+
+ if autoremove:
+ autoremove = '--auto-remove'
+ else:
+ autoremove = ''
+
+ if m.check_mode:
+ check_arg = '--simulate'
+ else:
+ check_arg = ''
+
+ cmd = "%s -q -y %s %s %s %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, force_yes, autoremove, check_arg, packages)
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd)
+
+ if m._diff:
+ diff = parse_diff(out)
+ else:
+ diff = {}
+ if rc:
+ m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err, rc=rc)
+ m.exit_json(changed=True, stdout=out, stderr=err, diff=diff)
+
+
+def cleanup(m, purge=False, force=False, operation=None,
+ dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
+
+ if operation not in frozenset(['autoremove', 'autoclean']):
+ raise AssertionError('Expected "autoremove" or "autoclean" cleanup operation, got %s' % operation)
+
+ if force:
+ force_yes = '--force-yes'
+ else:
+ force_yes = ''
+
+ if purge:
+ purge = '--purge'
+ else:
+ purge = ''
+
+ if m.check_mode:
+ check_arg = '--simulate'
+ else:
+ check_arg = ''
+
+ cmd = "%s -y %s %s %s %s %s" % (APT_GET_CMD, dpkg_options, purge, force_yes, operation, check_arg)
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd)
+
+ if m._diff:
+ diff = parse_diff(out)
+ else:
+ diff = {}
+ if rc:
+ m.fail_json(msg="'apt-get %s' failed: %s" % (operation, err), stdout=out, stderr=err, rc=rc)
+
+ changed = CLEAN_OP_CHANGED_STR[operation] in out
+
+ m.exit_json(changed=changed, stdout=out, stderr=err, diff=diff)
+
+
+def aptclean(m):
+ clean_rc, clean_out, clean_err = m.run_command(['apt-get', 'clean'])
+ if m._diff:
+ clean_diff = parse_diff(clean_out)
+ else:
+ clean_diff = {}
+ if clean_rc:
+ m.fail_json(msg="apt-get clean failed", stdout=clean_out, rc=clean_rc)
+ if clean_err:
+ m.fail_json(msg="apt-get clean failed: %s" % clean_err, stdout=clean_out, rc=clean_rc)
+ return clean_out, clean_err
+
+
+def upgrade(m, mode="yes", force=False, default_release=None,
+ use_apt_get=False,
+ dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False, fail_on_autoremove=False,
+ allow_unauthenticated=False,
+ allow_downgrade=False,
+ ):
+
+ if autoremove:
+ autoremove = '--auto-remove'
+ else:
+ autoremove = ''
+
+ if m.check_mode:
+ check_arg = '--simulate'
+ else:
+ check_arg = ''
+
+ apt_cmd = None
+ prompt_regex = None
+ if mode == "dist" or (mode == "full" and use_apt_get):
+ # apt-get dist-upgrade
+ apt_cmd = APT_GET_CMD
+ upgrade_command = "dist-upgrade %s" % (autoremove)
+ elif mode == "full" and not use_apt_get:
+ # aptitude full-upgrade
+ apt_cmd = APTITUDE_CMD
+ upgrade_command = "full-upgrade"
+ else:
+ if use_apt_get:
+ apt_cmd = APT_GET_CMD
+ upgrade_command = "upgrade --with-new-pkgs %s" % (autoremove)
+ else:
+ # aptitude safe-upgrade # mode=yes # default
+ apt_cmd = APTITUDE_CMD
+ upgrade_command = "safe-upgrade"
+ prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])"
+
+ if force:
+ if apt_cmd == APT_GET_CMD:
+ force_yes = '--force-yes'
+ else:
+ force_yes = '--assume-yes --allow-untrusted'
+ else:
+ force_yes = ''
+
+ if fail_on_autoremove:
+ fail_on_autoremove = '--no-remove'
+ else:
+ fail_on_autoremove = ''
+
+ allow_unauthenticated = '--allow-unauthenticated' if allow_unauthenticated else ''
+
+ allow_downgrade = '--allow-downgrades' if allow_downgrade else ''
+
+ if apt_cmd is None:
+ if use_apt_get:
+ apt_cmd = APT_GET_CMD
+ else:
+ m.fail_json(msg="Unable to find APTITUDE in path. Please make sure "
+ "to have APTITUDE in path or use 'force_apt_get=True'")
+ apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
+
+ cmd = '%s -y %s %s %s %s %s %s %s' % (
+ apt_cmd_path,
+ dpkg_options,
+ force_yes,
+ fail_on_autoremove,
+ allow_unauthenticated,
+ allow_downgrade,
+ check_arg,
+ upgrade_command,
+ )
+
+ if default_release:
+ cmd += " -t '%s'" % (default_release,)
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex)
+
+ if m._diff:
+ diff = parse_diff(out)
+ else:
+ diff = {}
+ if rc:
+ m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out, rc=rc)
+ if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out):
+ m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
+ m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff)
+
+
+def get_cache_mtime():
+ """Return mtime of a valid apt cache file.
+ Stat the apt cache file and if no cache file is found return 0
+ :returns: ``int``
+ """
+ cache_time = 0
+ if os.path.exists(APT_UPDATE_SUCCESS_STAMP_PATH):
+ cache_time = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime
+ elif os.path.exists(APT_LISTS_PATH):
+ cache_time = os.stat(APT_LISTS_PATH).st_mtime
+ return cache_time
+
+
+def get_updated_cache_time():
+ """Return the mtime time stamp and the updated cache time.
+ Always retrieve the mtime of the apt cache or set the `cache_mtime`
+ variable to 0
+ :returns: ``tuple``
+ """
+ cache_mtime = get_cache_mtime()
+ mtimestamp = datetime.datetime.fromtimestamp(cache_mtime)
+ updated_cache_time = int(time.mktime(mtimestamp.timetuple()))
+ return mtimestamp, updated_cache_time
+
+
+# https://github.com/ansible/ansible-modules-core/issues/2951
+def get_cache(module):
+ '''Attempt to get the cache object and update till it works'''
+ cache = None
+ try:
+ cache = apt.Cache()
+ except SystemError as e:
+ if '/var/lib/apt/lists/' in to_native(e).lower():
+ # update cache until files are fixed or retries exceeded
+ retries = 0
+ while retries < 2:
+ (rc, so, se) = module.run_command(['apt-get', 'update', '-q'])
+ retries += 1
+ if rc == 0:
+ break
+ if rc != 0:
+ module.fail_json(msg='Updating the cache to correct corrupt package lists failed:\n%s\n%s' % (to_native(e), so + se), rc=rc)
+ # try again
+ cache = apt.Cache()
+ else:
+ module.fail_json(msg=to_native(e))
+ return cache
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
+ update_cache=dict(type='bool', aliases=['update-cache']),
+ update_cache_retries=dict(type='int', default=5),
+ update_cache_retry_max_delay=dict(type='int', default=12),
+ cache_valid_time=dict(type='int', default=0),
+ purge=dict(type='bool', default=False),
+ package=dict(type='list', elements='str', aliases=['pkg', 'name']),
+ deb=dict(type='path'),
+ default_release=dict(type='str', aliases=['default-release']),
+ install_recommends=dict(type='bool', aliases=['install-recommends']),
+ force=dict(type='bool', default=False),
+ upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes'], default='no'),
+ dpkg_options=dict(type='str', default=DPKG_OPTIONS),
+ autoremove=dict(type='bool', default=False),
+ autoclean=dict(type='bool', default=False),
+ fail_on_autoremove=dict(type='bool', default=False),
+ policy_rc_d=dict(type='int', default=None),
+ only_upgrade=dict(type='bool', default=False),
+ force_apt_get=dict(type='bool', default=False),
+ clean=dict(type='bool', default=False),
+ allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']),
+ allow_downgrade=dict(type='bool', default=False, aliases=['allow-downgrade', 'allow_downgrades', 'allow-downgrades']),
+ allow_change_held_packages=dict(type='bool', default=False),
+ lock_timeout=dict(type='int', default=60),
+ ),
+ mutually_exclusive=[['deb', 'package', 'upgrade']],
+ required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
+ supports_check_mode=True,
+ )
+
+ # We screenscrape apt-get and aptitude output for information so we need
+ # to make sure we use the best parsable locale when running commands
+ # also set apt specific vars for desired behaviour
+ locale = get_best_parsable_locale(module)
+ # APT related constants
+ APT_ENV_VARS = dict(
+ DEBIAN_FRONTEND='noninteractive',
+ DEBIAN_PRIORITY='critical',
+ LANG=locale,
+ LC_ALL=locale,
+ LC_MESSAGES=locale,
+ LC_CTYPE=locale,
+ )
+ module.run_command_environ_update = APT_ENV_VARS
+
+ if not HAS_PYTHON_APT:
+ # This interpreter can't see the apt Python library- we'll do the following to try and fix that:
+ # 1) look in common locations for system-owned interpreters that can see it; if we find one, respawn under it
+ # 2) finding none, try to install a matching python-apt package for the current interpreter version;
+ # we limit to the current interpreter version to try and avoid installing a whole other Python just
+ # for apt support
+ # 3) if we installed a support package, try to respawn under what we think is the right interpreter (could be
+ # the current interpreter again, but we'll let it respawn anyway for simplicity)
+ # 4) if still not working, return an error and give up (some corner cases not covered, but this shouldn't be
+ # made any more complex than it already is to try and cover more, eg, custom interpreters taking over
+ # system locations)
+
+ apt_pkg_name = 'python3-apt' if PY3 else 'python-apt'
+
+ if has_respawned():
+ # this shouldn't be possible; short-circuit early if it happens...
+ module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
+
+ interpreters = ['/usr/bin/python3', '/usr/bin/python2', '/usr/bin/python']
+
+ interpreter = probe_interpreters_for_module(interpreters, 'apt')
+
+ if interpreter:
+ # found the Python bindings; respawn this module under the interpreter where we found them
+ respawn_module(interpreter)
+ # this is the end of the line for this process, it will exit here once the respawned module has completed
+
+ # don't make changes if we're in check_mode
+ if module.check_mode:
+ module.fail_json(msg="%s must be installed to use check mode. "
+ "If run normally this module can auto-install it." % apt_pkg_name)
+
+ # We skip cache update in auto install the dependency if the
+ # user explicitly declared it with update_cache=no.
+ if module.params.get('update_cache') is False:
+ module.warn("Auto-installing missing dependency without updating cache: %s" % apt_pkg_name)
+ else:
+ module.warn("Updating cache and auto-installing missing dependency: %s" % apt_pkg_name)
+ module.run_command(['apt-get', 'update'], check_rc=True)
+
+ # try to install the apt Python binding
+ module.run_command(['apt-get', 'install', '--no-install-recommends', apt_pkg_name, '-y', '-q'], check_rc=True)
+
+ # try again to find the bindings in common places
+ interpreter = probe_interpreters_for_module(interpreters, 'apt')
+
+ if interpreter:
+ # found the Python bindings; respawn this module under the interpreter where we found them
+ # NB: respawn is somewhat wasteful if it's this interpreter, but simplifies the code
+ respawn_module(interpreter)
+ # this is the end of the line for this process, it will exit here once the respawned module has completed
+ else:
+ # we've done all we can do; just tell the user it's busted and get out
+ module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
+
+ global APTITUDE_CMD
+ APTITUDE_CMD = module.get_bin_path("aptitude", False)
+ global APT_GET_CMD
+ APT_GET_CMD = module.get_bin_path("apt-get")
+
+ p = module.params
+
+ if p['clean'] is True:
+ aptclean_stdout, aptclean_stderr = aptclean(module)
+ # If there is nothing else to do exit. This will set state as
+ # changed based on if the cache was updated.
+ if not p['package'] and not p['upgrade'] and not p['deb']:
+ module.exit_json(
+ changed=True,
+ msg=aptclean_stdout,
+ stdout=aptclean_stdout,
+ stderr=aptclean_stderr
+ )
+
+ if p['upgrade'] == 'no':
+ p['upgrade'] = None
+
+ use_apt_get = p['force_apt_get']
+
+ if not use_apt_get and not APTITUDE_CMD:
+ use_apt_get = True
+
+ updated_cache = False
+ updated_cache_time = 0
+ install_recommends = p['install_recommends']
+ allow_unauthenticated = p['allow_unauthenticated']
+ allow_downgrade = p['allow_downgrade']
+ allow_change_held_packages = p['allow_change_held_packages']
+ dpkg_options = expand_dpkg_options(p['dpkg_options'])
+ autoremove = p['autoremove']
+ fail_on_autoremove = p['fail_on_autoremove']
+ autoclean = p['autoclean']
+
+ # max times we'll retry
+ deadline = time.time() + p['lock_timeout']
+
+ # keep running on lock issues unless timeout or resolution is hit.
+ while True:
+
+ # Get the cache object, this has 3 retries built in
+ cache = get_cache(module)
+
+ try:
+ if p['default_release']:
+ try:
+ apt_pkg.config['APT::Default-Release'] = p['default_release']
+ except AttributeError:
+ apt_pkg.Config['APT::Default-Release'] = p['default_release']
+ # reopen cache w/ modified config
+ cache.open(progress=None)
+
+ mtimestamp, updated_cache_time = get_updated_cache_time()
+ # Cache valid time is default 0, which will update the cache if
+ # needed and `update_cache` was set to true
+ updated_cache = False
+ if p['update_cache'] or p['cache_valid_time']:
+ now = datetime.datetime.now()
+ tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
+ if not mtimestamp + tdelta >= now:
+ # Retry to update the cache with exponential backoff
+ err = ''
+ update_cache_retries = module.params.get('update_cache_retries')
+ update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
+ randomize = random.randint(0, 1000) / 1000.0
+
+ for retry in range(update_cache_retries):
+ try:
+ if not module.check_mode:
+ cache.update()
+ break
+ except apt.cache.FetchFailedException as e:
+ err = to_native(e)
+
+ # Use exponential backoff plus a little bit of randomness
+ delay = 2 ** retry + randomize
+ if delay > update_cache_retry_max_delay:
+ delay = update_cache_retry_max_delay + randomize
+ time.sleep(delay)
+ else:
+ module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
+
+ cache.open(progress=None)
+ mtimestamp, post_cache_update_time = get_updated_cache_time()
+ if module.check_mode or updated_cache_time != post_cache_update_time:
+ updated_cache = True
+ updated_cache_time = post_cache_update_time
+
+ # If there is nothing else to do exit. This will set state as
+ # changed based on if the cache was updated.
+ if not p['package'] and not p['upgrade'] and not p['deb']:
+ module.exit_json(
+ changed=updated_cache,
+ cache_updated=updated_cache,
+ cache_update_time=updated_cache_time
+ )
+
+ force_yes = p['force']
+
+ if p['upgrade']:
+ upgrade(
+ module,
+ p['upgrade'],
+ force_yes,
+ p['default_release'],
+ use_apt_get,
+ dpkg_options,
+ autoremove,
+ fail_on_autoremove,
+ allow_unauthenticated,
+ allow_downgrade
+ )
+
+ if p['deb']:
+ if p['state'] != 'present':
+ module.fail_json(msg="deb only supports state=present")
+ if '://' in p['deb']:
+ p['deb'] = fetch_file(module, p['deb'])
+ install_deb(module, p['deb'], cache,
+ install_recommends=install_recommends,
+ allow_unauthenticated=allow_unauthenticated,
+ allow_change_held_packages=allow_change_held_packages,
+ allow_downgrade=allow_downgrade,
+ force=force_yes, fail_on_autoremove=fail_on_autoremove, dpkg_options=p['dpkg_options'])
+
+ unfiltered_packages = p['package'] or ()
+ packages = [package.strip() for package in unfiltered_packages if package != '*']
+ all_installed = '*' in unfiltered_packages
+ latest = p['state'] == 'latest'
+
+ if latest and all_installed:
+ if packages:
+ module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
+ upgrade(
+ module,
+ 'yes',
+ force_yes,
+ p['default_release'],
+ use_apt_get,
+ dpkg_options,
+ autoremove,
+ fail_on_autoremove,
+ allow_unauthenticated,
+ allow_downgrade
+ )
+
+ if packages:
+ for package in packages:
+ if package.count('=') > 1:
+ module.fail_json(msg="invalid package spec: %s" % package)
+
+ if not packages:
+ if autoclean:
+ cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options)
+ if autoremove:
+ cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options)
+
+ if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
+ state_upgrade = False
+ state_builddep = False
+ state_fixed = False
+ if p['state'] == 'latest':
+ state_upgrade = True
+ if p['state'] == 'build-dep':
+ state_builddep = True
+ if p['state'] == 'fixed':
+ state_fixed = True
+
+ success, retvals = install(
+ module,
+ packages,
+ cache,
+ upgrade=state_upgrade,
+ default_release=p['default_release'],
+ install_recommends=install_recommends,
+ force=force_yes,
+ dpkg_options=dpkg_options,
+ build_dep=state_builddep,
+ fixed=state_fixed,
+ autoremove=autoremove,
+ fail_on_autoremove=fail_on_autoremove,
+ only_upgrade=p['only_upgrade'],
+ allow_unauthenticated=allow_unauthenticated,
+ allow_downgrade=allow_downgrade,
+ allow_change_held_packages=allow_change_held_packages,
+ )
+
+ # Store if the cache has been updated
+ retvals['cache_updated'] = updated_cache
+ # Store when the update time was last
+ retvals['cache_update_time'] = updated_cache_time
+
+ if success:
+ module.exit_json(**retvals)
+ else:
+ module.fail_json(**retvals)
+ elif p['state'] == 'absent':
+ remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove)
+
+ except apt.cache.LockFailedException as lockFailedException:
+ if time.time() < deadline:
+ continue
+ module.fail_json(msg="Failed to lock apt for exclusive operation: %s" % lockFailedException)
+ except apt.cache.FetchFailedException as fetchFailedException:
+ module.fail_json(msg="Could not fetch updated apt files: %s" % fetchFailedException)
+
+ # got here w/o exception and/or exit???
+ module.fail_json(msg='Unexpected code path taken, we really should have exited before, this is a bug')
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/apt_key.py b/lib/ansible/modules/apt_key.py
new file mode 100644
index 0000000..67caf6d
--- /dev/null
+++ b/lib/ansible/modules/apt_key.py
@@ -0,0 +1,530 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2012, Jayson Vantuyl <jayson@aggressive.ly>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apt_key
+author:
+- Jayson Vantuyl (@jvantuyl)
+version_added: "1.0"
+short_description: Add or remove an apt key
+description:
+ - Add or remove an I(apt) key, optionally downloading it.
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: debian
+notes:
+ - The apt-key command has been deprecated and suggests to 'manage keyring files in trusted.gpg.d instead'. See the Debian wiki for details.
+ This module is kept for backwards compatibility for systems that still use apt-key as the main way to manage apt repository keys.
+ - As a sanity check, downloaded key id must match the one specified.
+ - "Use full fingerprint (40 characters) key ids to avoid key collisions.
+ To generate a full-fingerprint imported key: C(apt-key adv --list-public-keys --with-fingerprint --with-colons)."
+ - If you specify both the key id and the URL with C(state=present), the task can verify or add the key as needed.
+ - Adding a new key requires an apt cache update (e.g. using the M(ansible.builtin.apt) module's update_cache option).
+requirements:
+ - gpg
+options:
+ id:
+ description:
+ - The identifier of the key.
+ - Including this allows check mode to correctly report the changed state.
+ - If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead.
+ - This parameter is required when C(state) is set to C(absent).
+ type: str
+ data:
+ description:
+ - The keyfile contents to add to the keyring.
+ type: str
+ file:
+ description:
+ - The path to a keyfile on the remote server to add to the keyring.
+ type: path
+ keyring:
+ description:
+ - The full path to specific keyring file in C(/etc/apt/trusted.gpg.d/).
+ type: path
+ version_added: "1.3"
+ url:
+ description:
+ - The URL to retrieve key from.
+ type: str
+ keyserver:
+ description:
+ - The keyserver to retrieve key from.
+ type: str
+ version_added: "1.6"
+ state:
+ description:
+ - Ensures that the key is present (added) or absent (revoked).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ validate_certs:
+ description:
+ - If C(false), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: One way to avoid apt_key once it is removed from your distro
+ block:
+ - name: somerepo |no apt key
+ ansible.builtin.get_url:
+ url: https://download.example.com/linux/ubuntu/gpg
+ dest: /etc/apt/trusted.gpg.d/somerepo.asc
+
+ - name: somerepo | apt source
+ ansible.builtin.apt_repository:
+ repo: "deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/myrepo.asc] https://download.example.com/linux/ubuntu {{ ansible_distribution_release }} stable"
+ state: present
+
+- name: Add an apt key by id from a keyserver
+ ansible.builtin.apt_key:
+ keyserver: keyserver.ubuntu.com
+ id: 36A1D7869245C8950F966E92D8576A8BA88D21E9
+
+- name: Add an Apt signing key, uses whichever key is at the URL
+ ansible.builtin.apt_key:
+ url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
+ state: present
+
+- name: Add an Apt signing key, will not download if present
+ ansible.builtin.apt_key:
+ id: 9FED2BCBDCD29CDF762678CBAED4B06F473041FA
+ url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
+ state: present
+
+- name: Remove a Apt specific signing key, leading 0x is valid
+ ansible.builtin.apt_key:
+ id: 0x9FED2BCBDCD29CDF762678CBAED4B06F473041FA
+ state: absent
+
+# Use armored file since utf-8 string is expected. Must be of "PGP PUBLIC KEY BLOCK" type.
+- name: Add a key from a file on the Ansible server
+ ansible.builtin.apt_key:
+ data: "{{ lookup('ansible.builtin.file', 'apt.asc') }}"
+ state: present
+
+- name: Add an Apt signing key to a specific keyring file
+ ansible.builtin.apt_key:
+ id: 9FED2BCBDCD29CDF762678CBAED4B06F473041FA
+ url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
+ keyring: /etc/apt/trusted.gpg.d/debian.gpg
+
+- name: Add Apt signing key on remote server to keyring
+ ansible.builtin.apt_key:
+ id: 9FED2BCBDCD29CDF762678CBAED4B06F473041FA
+ file: /tmp/apt.gpg
+ state: present
+'''
+
+RETURN = '''
+after:
+ description: List of apt key ids or fingerprints after any modification
+ returned: on change
+ type: list
+ sample: ["D8576A8BA88D21E9", "3B4FE6ACC0B21F32", "D94AA3F0EFE21092", "871920D1991BC93C"]
+before:
+ description: List of apt key ids or fingprints before any modifications
+ returned: always
+ type: list
+ sample: ["3B4FE6ACC0B21F32", "D94AA3F0EFE21092", "871920D1991BC93C"]
+fp:
+ description: Fingerprint of the key to import
+ returned: always
+ type: str
+ sample: "D8576A8BA88D21E9"
+id:
+ description: key id from source
+ returned: always
+ type: str
+ sample: "36A1D7869245C8950F966E92D8576A8BA88D21E9"
+key_id:
+ description: calculated key id, it should be same as 'id', but can be different
+ returned: always
+ type: str
+ sample: "36A1D7869245C8950F966E92D8576A8BA88D21E9"
+short_id:
+ description: calculated short key id
+ returned: always
+ type: str
+ sample: "A88D21E9"
+'''
+
+import os
+
+# FIXME: standardize into module_common
+from traceback import format_exc
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.urls import fetch_url
+
+
+apt_key_bin = None
+gpg_bin = None
+locale = None
+
+
+def lang_env(module):
+
+ if not hasattr(lang_env, 'result'):
+ locale = get_best_parsable_locale(module)
+ lang_env.result = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
+
+ return lang_env.result
+
+
+def find_needed_binaries(module):
+ global apt_key_bin
+ global gpg_bin
+ apt_key_bin = module.get_bin_path('apt-key', required=True)
+ gpg_bin = module.get_bin_path('gpg', required=True)
+
+
+def add_http_proxy(cmd):
+
+ for envvar in ('HTTPS_PROXY', 'https_proxy', 'HTTP_PROXY', 'http_proxy'):
+ proxy = os.environ.get(envvar)
+ if proxy:
+ break
+
+ if proxy:
+ cmd += ' --keyserver-options http-proxy=%s' % proxy
+
+ return cmd
+
+
+def parse_key_id(key_id):
+ """validate the key_id and break it into segments
+
+ :arg key_id: The key_id as supplied by the user. A valid key_id will be
+ 8, 16, or more hexadecimal chars with an optional leading ``0x``.
+ :returns: The portion of key_id suitable for apt-key del, the portion
+ suitable for comparisons with --list-public-keys, and the portion that
+ can be used with --recv-key. If key_id is long enough, these will be
+ the last 8 characters of key_id, the last 16 characters, and all of
+ key_id. If key_id is not long enough, some of the values will be the
+ same.
+
+ * apt-key del <= 1.10 has a bug with key_id != 8 chars
+ * apt-key adv --list-public-keys prints 16 chars
+ * apt-key adv --recv-key can take more chars
+
+ """
+ # Make sure the key_id is valid hexadecimal
+ int(to_native(key_id), 16)
+
+ key_id = key_id.upper()
+ if key_id.startswith('0X'):
+ key_id = key_id[2:]
+
+ key_id_len = len(key_id)
+ if (key_id_len != 8 and key_id_len != 16) and key_id_len <= 16:
+ raise ValueError('key_id must be 8, 16, or 16+ hexadecimal characters in length')
+
+ short_key_id = key_id[-8:]
+
+ fingerprint = key_id
+ if key_id_len > 16:
+ fingerprint = key_id[-16:]
+
+ return short_key_id, fingerprint, key_id
+
+
+def parse_output_for_keys(output, short_format=False):
+
+ found = []
+ lines = to_native(output).split('\n')
+ for line in lines:
+ if (line.startswith("pub") or line.startswith("sub")) and "expired" not in line:
+ try:
+ # apt key format
+ tokens = line.split()
+ code = tokens[1]
+ (len_type, real_code) = code.split("/")
+ except (IndexError, ValueError):
+ # gpg format
+ try:
+ tokens = line.split(':')
+ real_code = tokens[4]
+ except (IndexError, ValueError):
+ # invalid line, skip
+ continue
+ found.append(real_code)
+
+ if found and short_format:
+ found = shorten_key_ids(found)
+
+ return found
+
+
+def all_keys(module, keyring, short_format):
+ if keyring is not None:
+ cmd = "%s --keyring %s adv --list-public-keys --keyid-format=long" % (apt_key_bin, keyring)
+ else:
+ cmd = "%s adv --list-public-keys --keyid-format=long" % apt_key_bin
+ (rc, out, err) = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="Unable to list public keys", cmd=cmd, rc=rc, stdout=out, stderr=err)
+
+ return parse_output_for_keys(out, short_format)
+
+
+def shorten_key_ids(key_id_list):
+ """
+ Takes a list of key ids, and converts them to the 'short' format,
+ by reducing them to their last 8 characters.
+ """
+ short = []
+ for key in key_id_list:
+ short.append(key[-8:])
+ return short
+
+
+def download_key(module, url):
+
+ try:
+ # note: validate_certs and other args are pulled from module directly
+ rsp, info = fetch_url(module, url, use_proxy=True)
+ if info['status'] != 200:
+ module.fail_json(msg="Failed to download key at %s: %s" % (url, info['msg']))
+
+ return rsp.read()
+ except Exception:
+ module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
+
+
+def get_key_id_from_file(module, filename, data=None):
+
+ native_data = to_native(data)
+ is_armored = native_data.find("-----BEGIN PGP PUBLIC KEY BLOCK-----") >= 0
+
+ key = None
+
+ cmd = [gpg_bin, '--with-colons', filename]
+
+ (rc, out, err) = module.run_command(cmd, environ_update=lang_env(module), data=(native_data if is_armored else data), binary_data=not is_armored)
+ if rc != 0:
+ module.fail_json(msg="Unable to extract key from '%s'" % ('inline data' if data is not None else filename), stdout=out, stderr=err)
+
+ keys = parse_output_for_keys(out)
+ # assume we only want first key?
+ if keys:
+ key = keys[0]
+
+ return key
+
+
+def get_key_id_from_data(module, data):
+ return get_key_id_from_file(module, '-', data)
+
+
+def import_key(module, keyring, keyserver, key_id):
+
+ if keyring:
+ cmd = "%s --keyring %s adv --no-tty --keyserver %s" % (apt_key_bin, keyring, keyserver)
+ else:
+ cmd = "%s adv --no-tty --keyserver %s" % (apt_key_bin, keyserver)
+
+ # check for proxy
+ cmd = add_http_proxy(cmd)
+
+ # add recv argument as last one
+ cmd = "%s --recv %s" % (cmd, key_id)
+
+ for retry in range(5):
+ (rc, out, err) = module.run_command(cmd, environ_update=lang_env(module))
+ if rc == 0:
+ break
+ else:
+ # Out of retries
+ if rc == 2 and 'not found on keyserver' in out:
+ msg = 'Key %s not found on keyserver %s' % (key_id, keyserver)
+ module.fail_json(cmd=cmd, msg=msg, forced_environment=lang_env(module))
+ else:
+ msg = "Error fetching key %s from keyserver: %s" % (key_id, keyserver)
+ module.fail_json(cmd=cmd, msg=msg, forced_environment=lang_env(module), rc=rc, stdout=out, stderr=err)
+ return True
+
+
+def add_key(module, keyfile, keyring, data=None):
+ if data is not None:
+ if keyring:
+ cmd = "%s --keyring %s add -" % (apt_key_bin, keyring)
+ else:
+ cmd = "%s add -" % apt_key_bin
+ (rc, out, err) = module.run_command(cmd, data=data, binary_data=True)
+ if rc != 0:
+ module.fail_json(
+ msg="Unable to add a key from binary data",
+ cmd=cmd,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ )
+ else:
+ if keyring:
+ cmd = "%s --keyring %s add %s" % (apt_key_bin, keyring, keyfile)
+ else:
+ cmd = "%s add %s" % (apt_key_bin, keyfile)
+ (rc, out, err) = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(
+ msg="Unable to add a key from file %s" % (keyfile),
+ cmd=cmd,
+ rc=rc,
+ keyfile=keyfile,
+ stdout=out,
+ stderr=err,
+ )
+ return True
+
+
+def remove_key(module, key_id, keyring):
+ if keyring:
+ cmd = '%s --keyring %s del %s' % (apt_key_bin, keyring, key_id)
+ else:
+ cmd = '%s del %s' % (apt_key_bin, key_id)
+ (rc, out, err) = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(
+ msg="Unable to remove a key with id %s" % (key_id),
+ cmd=cmd,
+ rc=rc,
+ key_id=key_id,
+ stdout=out,
+ stderr=err,
+ )
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type='str'),
+ url=dict(type='str'),
+ data=dict(type='str'),
+ file=dict(type='path'),
+ keyring=dict(type='path'),
+ validate_certs=dict(type='bool', default=True),
+ keyserver=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=(('data', 'file', 'keyserver', 'url'),),
+ )
+
+ # parameters
+ key_id = module.params['id']
+ url = module.params['url']
+ data = module.params['data']
+ filename = module.params['file']
+ keyring = module.params['keyring']
+ state = module.params['state']
+ keyserver = module.params['keyserver']
+
+ # internal vars
+ short_format = False
+ short_key_id = None
+ fingerprint = None
+ error_no_error = "apt-key did not return an error, but %s (check that the id is correct and *not* a subkey)"
+
+ # ensure we have requirements met
+ find_needed_binaries(module)
+
+ # initialize result dict
+ r = {'changed': False}
+
+ if not key_id:
+
+ if keyserver:
+ module.fail_json(msg="Missing key_id, required with keyserver.")
+
+ if url:
+ data = download_key(module, url)
+
+ if filename:
+ key_id = get_key_id_from_file(module, filename)
+ elif data:
+ key_id = get_key_id_from_data(module, data)
+
+ r['id'] = key_id
+ try:
+ short_key_id, fingerprint, key_id = parse_key_id(key_id)
+ r['short_id'] = short_key_id
+ r['fp'] = fingerprint
+ r['key_id'] = key_id
+ except ValueError:
+ module.fail_json(msg='Invalid key_id', **r)
+
+ if not fingerprint:
+ # invalid key should fail well before this point, but JIC ...
+ module.fail_json(msg="Unable to continue as we could not extract a valid fingerprint to compare against existing keys.", **r)
+
+ if len(key_id) == 8:
+ short_format = True
+
+ # get existing keys to verify if we need to change
+ r['before'] = keys = all_keys(module, keyring, short_format)
+ keys2 = []
+
+ if state == 'present':
+ if (short_format and short_key_id not in keys) or (not short_format and fingerprint not in keys):
+ r['changed'] = True
+ if not module.check_mode:
+ if filename:
+ add_key(module, filename, keyring)
+ elif keyserver:
+ import_key(module, keyring, keyserver, key_id)
+ elif data:
+ # this also takes care of url if key_id was not provided
+ add_key(module, "-", keyring, data)
+ elif url:
+ # we hit this branch only if key_id is supplied with url
+ data = download_key(module, url)
+ add_key(module, "-", keyring, data)
+ else:
+ module.fail_json(msg="No key to add ... how did i get here?!?!", **r)
+
+ # verify it got added
+ r['after'] = keys2 = all_keys(module, keyring, short_format)
+ if (short_format and short_key_id not in keys2) or (not short_format and fingerprint not in keys2):
+ module.fail_json(msg=error_no_error % 'failed to add the key', **r)
+
+ elif state == 'absent':
+ if not key_id:
+ module.fail_json(msg="key is required to remove a key", **r)
+ if fingerprint in keys:
+ r['changed'] = True
+ if not module.check_mode:
+ # we use the "short" id: key_id[-8:], short_format=True
+ # it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871
+ if short_key_id is not None and remove_key(module, short_key_id, keyring):
+ r['after'] = keys2 = all_keys(module, keyring, short_format)
+ if fingerprint in keys2:
+ module.fail_json(msg=error_no_error % 'the key was not removed', **r)
+ else:
+ module.fail_json(msg="error removing key_id", **r)
+
+ module.exit_json(**r)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/apt_repository.py b/lib/ansible/modules/apt_repository.py
new file mode 100644
index 0000000..f9a0cd9
--- /dev/null
+++ b/lib/ansible/modules/apt_repository.py
@@ -0,0 +1,735 @@
+# encoding: utf-8
+
+# Copyright: (c) 2012, Matt Wright <matt@nobien.net>
+# Copyright: (c) 2013, Alexander Saltanov <asd@mokote.com>
+# Copyright: (c) 2014, Rutger Spiertz <rutger@kumina.nl>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apt_repository
+short_description: Add and remove APT repositories
+description:
+ - Add or remove an APT repositories in Ubuntu and Debian.
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: debian
+notes:
+ - This module supports Debian Squeeze (version 6) as well as its successors and derivatives.
+options:
+ repo:
+ description:
+ - A source string for the repository.
+ type: str
+ required: true
+ state:
+ description:
+ - A source string state.
+ type: str
+ choices: [ absent, present ]
+ default: "present"
+ mode:
+ description:
+ - The octal mode for newly created files in sources.list.d.
+ - Default is what system uses (probably 0644).
+ type: raw
+ version_added: "1.6"
+ update_cache:
+ description:
+ - Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
+ type: bool
+ default: "yes"
+ aliases: [ update-cache ]
+ update_cache_retries:
+ description:
+ - Amount of retries if the cache update fails. Also see I(update_cache_retry_max_delay).
+ type: int
+ default: 5
+ version_added: '2.10'
+ update_cache_retry_max_delay:
+ description:
+ - Use an exponential backoff delay for each retry (see I(update_cache_retries)) up to this max delay in seconds.
+ type: int
+ default: 12
+ version_added: '2.10'
+ validate_certs:
+ description:
+ - If C(false), SSL certificates for the target repo will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ version_added: '1.8'
+ filename:
+ description:
+ - Sets the name of the source list file in sources.list.d.
+ Defaults to a file name based on the repository source url.
+ The .list extension will be automatically added.
+ type: str
+ version_added: '2.1'
+ codename:
+ description:
+ - Override the distribution codename to use for PPA repositories.
+ Should usually only be set when working with a PPA on
+ a non-Ubuntu target (for example, Debian or Mint).
+ type: str
+ version_added: '2.3'
+ install_python_apt:
+ description:
+ - Whether to automatically try to install the Python apt library or not, if it is not already installed.
+ Without this library, the module does not work.
+ - Runs C(apt-get install python-apt) for Python 2, and C(apt-get install python3-apt) for Python 3.
+ - Only works with the system Python 2 or Python 3. If you are using a Python on the remote that is not
+ the system Python, set I(install_python_apt=false) and ensure that the Python apt library
+ for your Python version is installed some other way.
+ type: bool
+ default: true
+author:
+- Alexander Saltanov (@sashka)
+version_added: "0.7"
+requirements:
+ - python-apt (python 2)
+ - python3-apt (python 3)
+ - apt-key or gpg
+'''
+
+EXAMPLES = '''
+- name: Add specified repository into sources list
+ ansible.builtin.apt_repository:
+ repo: deb http://archive.canonical.com/ubuntu hardy partner
+ state: present
+
+- name: Add specified repository into sources list using specified filename
+ ansible.builtin.apt_repository:
+ repo: deb http://dl.google.com/linux/chrome/deb/ stable main
+ state: present
+ filename: google-chrome
+
+- name: Add source repository into sources list
+ ansible.builtin.apt_repository:
+ repo: deb-src http://archive.canonical.com/ubuntu hardy partner
+ state: present
+
+- name: Remove specified repository from sources list
+ ansible.builtin.apt_repository:
+ repo: deb http://archive.canonical.com/ubuntu hardy partner
+ state: absent
+
+- name: Add nginx stable repository from PPA and install its signing key on Ubuntu target
+ ansible.builtin.apt_repository:
+ repo: ppa:nginx/stable
+
+- name: Add nginx stable repository from PPA and install its signing key on Debian target
+ ansible.builtin.apt_repository:
+ repo: 'ppa:nginx/stable'
+ codename: trusty
+
+- name: One way to avoid apt_key once it is removed from your distro
+ block:
+ - name: somerepo |no apt key
+ ansible.builtin.get_url:
+ url: https://download.example.com/linux/ubuntu/gpg
+ dest: /etc/apt/trusted.gpg.d/somerepo.asc
+
+ - name: somerepo | apt source
+ ansible.builtin.apt_repository:
+ repo: "deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/myrepo.asc] https://download.example.com/linux/ubuntu {{ ansible_distribution_release }} stable"
+ state: present
+'''
+
+RETURN = '''#'''
+
+import copy
+import glob
+import json
+import os
+import re
+import sys
+import tempfile
+import random
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import PY3
+from ansible.module_utils.urls import fetch_url
+
+try:
+ import apt
+ import apt_pkg
+ import aptsources.distro as aptsources_distro
+
+ distro = aptsources_distro.get_distro()
+
+ HAVE_PYTHON_APT = True
+except ImportError:
+ apt = apt_pkg = aptsources_distro = distro = None
+
+ HAVE_PYTHON_APT = False
+
+APT_KEY_DIRS = ['/etc/apt/keyrings', '/etc/apt/trusted.gpg.d', '/usr/share/keyrings']
+DEFAULT_SOURCES_PERM = 0o0644
+VALID_SOURCE_TYPES = ('deb', 'deb-src')
+
+
+def install_python_apt(module, apt_pkg_name):
+
+ if not module.check_mode:
+ apt_get_path = module.get_bin_path('apt-get')
+ if apt_get_path:
+ rc, so, se = module.run_command([apt_get_path, 'update'])
+ if rc != 0:
+ module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (apt_pkg_name, se.strip()))
+ rc, so, se = module.run_command([apt_get_path, 'install', apt_pkg_name, '-y', '-q'])
+ if rc != 0:
+ module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (apt_pkg_name, se.strip()))
+ else:
+ module.fail_json(msg="%s must be installed to use check mode" % apt_pkg_name)
+
+
+class InvalidSource(Exception):
+ pass
+
+
+# Simple version of aptsources.sourceslist.SourcesList.
+# No advanced logic and no backups inside.
+class SourcesList(object):
+ def __init__(self, module):
+ self.module = module
+ self.files = {} # group sources by file
+ # Repositories that we're adding -- used to implement mode param
+ self.new_repos = set()
+ self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist')
+
+ # read sources.list if it exists
+ if os.path.isfile(self.default_file):
+ self.load(self.default_file)
+
+ # read sources.list.d
+ for file in glob.iglob('%s/*.list' % self._apt_cfg_dir('Dir::Etc::sourceparts')):
+ self.load(file)
+
+ def __iter__(self):
+ '''Simple iterator to go over all sources. Empty, non-source, and other not valid lines will be skipped.'''
+ for file, sources in self.files.items():
+ for n, valid, enabled, source, comment in sources:
+ if valid:
+ yield file, n, enabled, source, comment
+
+ def _expand_path(self, filename):
+ if '/' in filename:
+ return filename
+ else:
+ return os.path.abspath(os.path.join(self._apt_cfg_dir('Dir::Etc::sourceparts'), filename))
+
+ def _suggest_filename(self, line):
+ def _cleanup_filename(s):
+ filename = self.module.params['filename']
+ if filename is not None:
+ return filename
+ return '_'.join(re.sub('[^a-zA-Z0-9]', ' ', s).split())
+
+ def _strip_username_password(s):
+ if '@' in s:
+ s = s.split('@', 1)
+ s = s[-1]
+ return s
+
+ # Drop options and protocols.
+ line = re.sub(r'\[[^\]]+\]', '', line)
+ line = re.sub(r'\w+://', '', line)
+
+ # split line into valid keywords
+ parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES]
+
+ # Drop usernames and passwords
+ parts[0] = _strip_username_password(parts[0])
+
+ return '%s.list' % _cleanup_filename(' '.join(parts[:1]))
+
+ def _parse(self, line, raise_if_invalid_or_disabled=False):
+ valid = False
+ enabled = True
+ source = ''
+ comment = ''
+
+ line = line.strip()
+ if line.startswith('#'):
+ enabled = False
+ line = line[1:]
+
+ # Check for another "#" in the line and treat a part after it as a comment.
+ i = line.find('#')
+ if i > 0:
+ comment = line[i + 1:].strip()
+ line = line[:i]
+
+ # Split a source into substring to make sure that it is source spec.
+ # Duplicated whitespaces in a valid source spec will be removed.
+ source = line.strip()
+ if source:
+ chunks = source.split()
+ if chunks[0] in VALID_SOURCE_TYPES:
+ valid = True
+ source = ' '.join(chunks)
+
+ if raise_if_invalid_or_disabled and (not valid or not enabled):
+ raise InvalidSource(line)
+
+ return valid, enabled, source, comment
+
+ @staticmethod
+ def _apt_cfg_file(filespec):
+ '''
+ Wrapper for `apt_pkg` module for running with Python 2.5
+ '''
+ try:
+ result = apt_pkg.config.find_file(filespec)
+ except AttributeError:
+ result = apt_pkg.Config.FindFile(filespec)
+ return result
+
+ @staticmethod
+ def _apt_cfg_dir(dirspec):
+ '''
+ Wrapper for `apt_pkg` module for running with Python 2.5
+ '''
+ try:
+ result = apt_pkg.config.find_dir(dirspec)
+ except AttributeError:
+ result = apt_pkg.Config.FindDir(dirspec)
+ return result
+
+ def load(self, file):
+ group = []
+ f = open(file, 'r')
+ for n, line in enumerate(f):
+ valid, enabled, source, comment = self._parse(line)
+ group.append((n, valid, enabled, source, comment))
+ self.files[file] = group
+
+ def save(self):
+ for filename, sources in list(self.files.items()):
+ if sources:
+ d, fn = os.path.split(filename)
+ try:
+ os.makedirs(d)
+ except OSError as ex:
+ if not os.path.isdir(d):
+ self.module.fail_json("Failed to create directory %s: %s" % (d, to_native(ex)))
+
+ try:
+ fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d)
+ except (OSError, IOError) as e:
+ self.module.fail_json(msg='Unable to create temp file at "%s" for apt source: %s' % (d, to_native(e)))
+
+ f = os.fdopen(fd, 'w')
+ for n, valid, enabled, source, comment in sources:
+ chunks = []
+ if not enabled:
+ chunks.append('# ')
+ chunks.append(source)
+ if comment:
+ chunks.append(' # ')
+ chunks.append(comment)
+ chunks.append('\n')
+ line = ''.join(chunks)
+
+ try:
+ f.write(line)
+ except IOError as ex:
+ self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, to_native(ex)))
+ self.module.atomic_move(tmp_path, filename)
+
+ # allow the user to override the default mode
+ if filename in self.new_repos:
+ this_mode = self.module.params.get('mode', DEFAULT_SOURCES_PERM)
+ self.module.set_mode_if_different(filename, this_mode, False)
+ else:
+ del self.files[filename]
+ if os.path.exists(filename):
+ os.remove(filename)
+
+ def dump(self):
+ dumpstruct = {}
+ for filename, sources in self.files.items():
+ if sources:
+ lines = []
+ for n, valid, enabled, source, comment in sources:
+ chunks = []
+ if not enabled:
+ chunks.append('# ')
+ chunks.append(source)
+ if comment:
+ chunks.append(' # ')
+ chunks.append(comment)
+ chunks.append('\n')
+ lines.append(''.join(chunks))
+ dumpstruct[filename] = ''.join(lines)
+ return dumpstruct
+
+ def _choice(self, new, old):
+ if new is None:
+ return old
+ return new
+
+ def modify(self, file, n, enabled=None, source=None, comment=None):
+ '''
+ This function to be used with iterator, so we don't care of invalid sources.
+ If source, enabled, or comment is None, original value from line ``n`` will be preserved.
+ '''
+ valid, enabled_old, source_old, comment_old = self.files[file][n][1:]
+ self.files[file][n] = (n, valid, self._choice(enabled, enabled_old), self._choice(source, source_old), self._choice(comment, comment_old))
+
+ def _add_valid_source(self, source_new, comment_new, file):
+ # We'll try to reuse disabled source if we have it.
+ # If we have more than one entry, we will enable them all - no advanced logic, remember.
+ self.module.log('ading source file: %s | %s | %s' % (source_new, comment_new, file))
+ found = False
+ for filename, n, enabled, source, comment in self:
+ if source == source_new:
+ self.modify(filename, n, enabled=True)
+ found = True
+
+ if not found:
+ if file is None:
+ file = self.default_file
+ else:
+ file = self._expand_path(file)
+
+ if file not in self.files:
+ self.files[file] = []
+
+ files = self.files[file]
+ files.append((len(files), True, True, source_new, comment_new))
+ self.new_repos.add(file)
+
+ def add_source(self, line, comment='', file=None):
+ source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
+
+ # Prefer separate files for new sources.
+ self._add_valid_source(source, comment, file=file or self._suggest_filename(source))
+
+ def _remove_valid_source(self, source):
+ # If we have more than one entry, we will remove them all (not comment, remove!)
+ for filename, n, enabled, src, comment in self:
+ if source == src and enabled:
+ self.files[filename].pop(n)
+
+ def remove_source(self, line):
+ source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
+ self._remove_valid_source(source)
+
+
+class UbuntuSourcesList(SourcesList):
+
+ LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s'
+
+ def __init__(self, module):
+ self.module = module
+ self.codename = module.params['codename'] or distro.codename
+ super(UbuntuSourcesList, self).__init__(module)
+
+ self.apt_key_bin = self.module.get_bin_path('apt-key', required=False)
+ self.gpg_bin = self.module.get_bin_path('gpg', required=False)
+ if not self.apt_key_bin and not self.gpg_bin:
+ self.module.fail_json(msg='Either apt-key or gpg binary is required, but neither could be found')
+
+ def __deepcopy__(self, memo=None):
+ return UbuntuSourcesList(self.module)
+
+ def _get_ppa_info(self, owner_name, ppa_name):
+ lp_api = self.LP_API % (owner_name, ppa_name)
+
+ headers = dict(Accept='application/json')
+ response, info = fetch_url(self.module, lp_api, headers=headers)
+ if info['status'] != 200:
+ self.module.fail_json(msg="failed to fetch PPA information, error was: %s" % info['msg'])
+ return json.loads(to_native(response.read()))
+
+ def _expand_ppa(self, path):
+ ppa = path.split(':')[1]
+ ppa_owner = ppa.split('/')[0]
+ try:
+ ppa_name = ppa.split('/')[1]
+ except IndexError:
+ ppa_name = 'ppa'
+
+ line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, self.codename)
+ return line, ppa_owner, ppa_name
+
+ def _key_already_exists(self, key_fingerprint):
+
+ if self.apt_key_bin:
+ rc, out, err = self.module.run_command([self.apt_key_bin, 'export', key_fingerprint], check_rc=True)
+ found = len(err) == 0
+ else:
+ found = self._gpg_key_exists(key_fingerprint)
+
+ return found
+
+ def _gpg_key_exists(self, key_fingerprint):
+
+ found = False
+ keyfiles = ['/etc/apt/trusted.gpg'] # main gpg repo for apt
+ for other_dir in APT_KEY_DIRS:
+ # add other known sources of gpg sigs for apt, skip hidden files
+ keyfiles.extend([os.path.join(other_dir, x) for x in os.listdir(other_dir) if not x.startswith('.')])
+
+ for key_file in keyfiles:
+
+ if os.path.exists(key_file):
+ try:
+ rc, out, err = self.module.run_command([self.gpg_bin, '--list-packets', key_file])
+ except (IOError, OSError) as e:
+ self.debug("Could check key against file %s: %s" % (key_file, to_native(e)))
+ continue
+
+ if key_fingerprint in out:
+ found = True
+ break
+
+ return found
+
+ # https://www.linuxuprising.com/2021/01/apt-key-is-deprecated-how-to-add.html
+ def add_source(self, line, comment='', file=None):
+ if line.startswith('ppa:'):
+ source, ppa_owner, ppa_name = self._expand_ppa(line)
+
+ if source in self.repos_urls:
+ # repository already exists
+ return
+
+ info = self._get_ppa_info(ppa_owner, ppa_name)
+
+ # add gpg sig if needed
+ if not self._key_already_exists(info['signing_key_fingerprint']):
+
+ # TODO: report file that would have been added if not check_mode
+ keyfile = ''
+ if not self.module.check_mode:
+ if self.apt_key_bin:
+ command = [self.apt_key_bin, 'adv', '--recv-keys', '--no-tty', '--keyserver', 'hkp://keyserver.ubuntu.com:80',
+ info['signing_key_fingerprint']]
+ else:
+ # use first available key dir, in order of preference
+ for keydir in APT_KEY_DIRS:
+ if os.path.exists(keydir):
+ break
+ else:
+ self.module.fail_json("Unable to find any existing apt gpgp repo directories, tried the following: %s" % ', '.join(APT_KEY_DIRS))
+
+ keyfile = '%s/%s-%s-%s.gpg' % (keydir, os.path.basename(source).replace(' ', '-'), ppa_owner, ppa_name)
+ command = [self.gpg_bin, '--no-tty', '--keyserver', 'hkp://keyserver.ubuntu.com:80', '--export', info['signing_key_fingerprint']]
+
+ rc, stdout, stderr = self.module.run_command(command, check_rc=True, encoding=None)
+ if keyfile:
+ # using gpg we must write keyfile ourselves
+ if len(stdout) == 0:
+ self.module.fail_json(msg='Unable to get required signing key', rc=rc, stderr=stderr, command=command)
+ try:
+ with open(keyfile, 'wb') as f:
+ f.write(stdout)
+ self.module.log('Added repo key "%s" for apt to file "%s"' % (info['signing_key_fingerprint'], keyfile))
+ except (OSError, IOError) as e:
+ self.module.fail_json(msg='Unable to add required signing key for%s ', rc=rc, stderr=stderr, error=to_native(e))
+
+ # apt source file
+ file = file or self._suggest_filename('%s_%s' % (line, self.codename))
+ else:
+ source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
+ file = file or self._suggest_filename(source)
+
+ self._add_valid_source(source, comment, file)
+
+ def remove_source(self, line):
+ if line.startswith('ppa:'):
+ source = self._expand_ppa(line)[0]
+ else:
+ source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
+ self._remove_valid_source(source)
+
+ @property
+ def repos_urls(self):
+ _repositories = []
+ for parsed_repos in self.files.values():
+ for parsed_repo in parsed_repos:
+ valid = parsed_repo[1]
+ enabled = parsed_repo[2]
+ source_line = parsed_repo[3]
+
+ if not valid or not enabled:
+ continue
+
+ if source_line.startswith('ppa:'):
+ source, ppa_owner, ppa_name = self._expand_ppa(source_line)
+ _repositories.append(source)
+ else:
+ _repositories.append(source_line)
+
+ return _repositories
+
+
+def revert_sources_list(sources_before, sources_after, sourceslist_before):
+ '''Revert the sourcelist files to their previous state.'''
+
+ # First remove any new files that were created:
+ for filename in set(sources_after.keys()).difference(sources_before.keys()):
+ if os.path.exists(filename):
+ os.remove(filename)
+ # Now revert the existing files to their former state:
+ sourceslist_before.save()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ mode=dict(type='raw'),
+ update_cache=dict(type='bool', default=True, aliases=['update-cache']),
+ update_cache_retries=dict(type='int', default=5),
+ update_cache_retry_max_delay=dict(type='int', default=12),
+ filename=dict(type='str'),
+ # This should not be needed, but exists as a failsafe
+ install_python_apt=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ codename=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ repo = module.params['repo']
+ state = module.params['state']
+ update_cache = module.params['update_cache']
+ # Note: mode is referenced in SourcesList class via the passed in module (self here)
+
+ sourceslist = None
+
+ if not HAVE_PYTHON_APT:
+ # This interpreter can't see the apt Python library- we'll do the following to try and fix that:
+ # 1) look in common locations for system-owned interpreters that can see it; if we find one, respawn under it
+ # 2) finding none, try to install a matching python-apt package for the current interpreter version;
+ # we limit to the current interpreter version to try and avoid installing a whole other Python just
+ # for apt support
+ # 3) if we installed a support package, try to respawn under what we think is the right interpreter (could be
+ # the current interpreter again, but we'll let it respawn anyway for simplicity)
+ # 4) if still not working, return an error and give up (some corner cases not covered, but this shouldn't be
+ # made any more complex than it already is to try and cover more, eg, custom interpreters taking over
+ # system locations)
+
+ apt_pkg_name = 'python3-apt' if PY3 else 'python-apt'
+
+ if has_respawned():
+ # this shouldn't be possible; short-circuit early if it happens...
+ module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
+
+ interpreters = ['/usr/bin/python3', '/usr/bin/python2', '/usr/bin/python']
+
+ interpreter = probe_interpreters_for_module(interpreters, 'apt')
+
+ if interpreter:
+ # found the Python bindings; respawn this module under the interpreter where we found them
+ respawn_module(interpreter)
+ # this is the end of the line for this process, it will exit here once the respawned module has completed
+
+ # don't make changes if we're in check_mode
+ if module.check_mode:
+ module.fail_json(msg="%s must be installed to use check mode. "
+ "If run normally this module can auto-install it." % apt_pkg_name)
+
+ if params['install_python_apt']:
+ install_python_apt(module, apt_pkg_name)
+ else:
+ module.fail_json(msg='%s is not installed, and install_python_apt is False' % apt_pkg_name)
+
+ # try again to find the bindings in common places
+ interpreter = probe_interpreters_for_module(interpreters, 'apt')
+
+ if interpreter:
+ # found the Python bindings; respawn this module under the interpreter where we found them
+ # NB: respawn is somewhat wasteful if it's this interpreter, but simplifies the code
+ respawn_module(interpreter)
+ # this is the end of the line for this process, it will exit here once the respawned module has completed
+ else:
+ # we've done all we can do; just tell the user it's busted and get out
+ module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
+
+ if not repo:
+ module.fail_json(msg='Please set argument \'repo\' to a non-empty value')
+
+ if isinstance(distro, aptsources_distro.Distribution):
+ sourceslist = UbuntuSourcesList(module)
+ else:
+ module.fail_json(msg='Module apt_repository is not supported on target.')
+
+ sourceslist_before = copy.deepcopy(sourceslist)
+ sources_before = sourceslist.dump()
+
+ try:
+ if state == 'present':
+ sourceslist.add_source(repo)
+ elif state == 'absent':
+ sourceslist.remove_source(repo)
+ except InvalidSource as ex:
+ module.fail_json(msg='Invalid repository string: %s' % to_native(ex))
+
+ sources_after = sourceslist.dump()
+ changed = sources_before != sources_after
+
+ if changed and module._diff:
+ diff = []
+ for filename in set(sources_before.keys()).union(sources_after.keys()):
+ diff.append({'before': sources_before.get(filename, ''),
+ 'after': sources_after.get(filename, ''),
+ 'before_header': (filename, '/dev/null')[filename not in sources_before],
+ 'after_header': (filename, '/dev/null')[filename not in sources_after]})
+ else:
+ diff = {}
+
+ if changed and not module.check_mode:
+ try:
+ sourceslist.save()
+ if update_cache:
+ err = ''
+ update_cache_retries = module.params.get('update_cache_retries')
+ update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
+ randomize = random.randint(0, 1000) / 1000.0
+
+ for retry in range(update_cache_retries):
+ try:
+ cache = apt.Cache()
+ cache.update()
+ break
+ except apt.cache.FetchFailedException as e:
+ err = to_native(e)
+
+ # Use exponential backoff with a max fail count, plus a little bit of randomness
+ delay = 2 ** retry + randomize
+ if delay > update_cache_retry_max_delay:
+ delay = update_cache_retry_max_delay + randomize
+ time.sleep(delay)
+ else:
+ revert_sources_list(sources_before, sources_after, sourceslist_before)
+ module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
+
+ except (OSError, IOError) as ex:
+ revert_sources_list(sources_before, sources_after, sourceslist_before)
+ module.fail_json(msg=to_native(ex))
+
+ module.exit_json(changed=changed, repo=repo, state=state, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/assemble.py b/lib/ansible/modules/assemble.py
new file mode 100644
index 0000000..2b443ce
--- /dev/null
+++ b/lib/ansible/modules/assemble.py
@@ -0,0 +1,280 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
+# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: assemble
+short_description: Assemble configuration files from fragments
+description:
+- Assembles a configuration file from fragments.
+- Often a particular program will take a single configuration file and does not support a
+ C(conf.d) style structure where it is easy to build up the configuration
+ from multiple sources. C(assemble) will take a directory of files that can be
+ local or have already been transferred to the system, and concatenate them
+ together to produce a destination file.
+- Files are assembled in string sorting order.
+- Puppet calls this idea I(fragments).
+version_added: '0.5'
+options:
+ src:
+ description:
+ - An already existing directory full of source files.
+ type: path
+ required: true
+ dest:
+ description:
+ - A file to create using the concatenation of all of the source files.
+ type: path
+ required: true
+ backup:
+ description:
+ - Create a backup file (if C(true)), including the timestamp information so
+ you can get the original file back if you somehow clobbered it
+ incorrectly.
+ type: bool
+ default: no
+ delimiter:
+ description:
+ - A delimiter to separate the file contents.
+ type: str
+ version_added: '1.4'
+ remote_src:
+ description:
+ - If C(false), it will search for src at originating/master machine.
+ - If C(true), it will go to the remote/target machine for the src.
+ type: bool
+ default: yes
+ version_added: '1.4'
+ regexp:
+ description:
+ - Assemble files only if C(regex) matches the filename.
+ - If not set, all files are assembled.
+ - Every C(\) (backslash) must be escaped as C(\\) to comply to YAML syntax.
+ - Uses L(Python regular expressions,https://docs.python.org/3/library/re.html).
+ type: str
+ ignore_hidden:
+ description:
+ - A boolean that controls if files that start with a '.' will be included or not.
+ type: bool
+ default: no
+ version_added: '2.0'
+ validate:
+ description:
+ - The validation command to run before copying into place.
+ - The path to the file to validate is passed in via '%s' which must be present as in the sshd example below.
+ - The command is passed securely so shell features like expansion and pipes won't work.
+ type: str
+ version_added: '2.0'
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix
+ safe_file_operations:
+ support: full
+ vault:
+ support: full
+ version_added: '2.2'
+seealso:
+- module: ansible.builtin.copy
+- module: ansible.builtin.template
+- module: ansible.windows.win_copy
+author:
+- Stephen Fromm (@sfromm)
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.flow
+ - action_common_attributes.files
+ - decrypt
+ - files
+'''
+
+EXAMPLES = r'''
+- name: Assemble from fragments from a directory
+ ansible.builtin.assemble:
+ src: /etc/someapp/fragments
+ dest: /etc/someapp/someapp.conf
+
+- name: Insert the provided delimiter between fragments
+ ansible.builtin.assemble:
+ src: /etc/someapp/fragments
+ dest: /etc/someapp/someapp.conf
+ delimiter: '### START FRAGMENT ###'
+
+- name: Assemble a new "sshd_config" file into place, after passing validation with sshd
+ ansible.builtin.assemble:
+ src: /etc/ssh/conf.d/
+ dest: /etc/ssh/sshd_config
+ validate: /usr/sbin/sshd -t -f %s
+'''
+
+RETURN = r'''#'''
+
+import codecs
+import os
+import re
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import b, indexbytes
+from ansible.module_utils._text import to_native
+
+
+def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, tmpdir=None):
+ ''' assemble a file from a directory of fragments '''
+ tmpfd, temp_path = tempfile.mkstemp(dir=tmpdir)
+ tmp = os.fdopen(tmpfd, 'wb')
+ delimit_me = False
+ add_newline = False
+
+ for f in sorted(os.listdir(src_path)):
+ if compiled_regexp and not compiled_regexp.search(f):
+ continue
+ fragment = os.path.join(src_path, f)
+ if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
+ continue
+ with open(fragment, 'rb') as fragment_fh:
+ fragment_content = fragment_fh.read()
+
+ # always put a newline between fragments if the previous fragment didn't end with a newline.
+ if add_newline:
+ tmp.write(b('\n'))
+
+ # delimiters should only appear between fragments
+ if delimit_me:
+ if delimiter:
+ # un-escape anything like newlines
+ delimiter = codecs.escape_decode(delimiter)[0]
+ tmp.write(delimiter)
+ # always make sure there's a newline after the
+ # delimiter, so lines don't run together
+
+ # byte indexing differs on Python 2 and 3,
+ # use indexbytes for compat
+ # chr(10) == '\n'
+ if indexbytes(delimiter, -1) != 10:
+ tmp.write(b('\n'))
+
+ tmp.write(fragment_content)
+ delimit_me = True
+ if fragment_content.endswith(b('\n')):
+ add_newline = False
+ else:
+ add_newline = True
+
+ tmp.close()
+ return temp_path
+
+
+def cleanup(path, result=None):
+ # cleanup just in case
+ if os.path.exists(path):
+ try:
+ os.remove(path)
+ except (IOError, OSError) as e:
+ # don't error on possible race conditions, but keep warning
+ if result is not None:
+ result['warnings'] = ['Unable to remove temp file (%s): %s' % (path, to_native(e))]
+
+
+def main():
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ src=dict(type='path', required=True),
+ delimiter=dict(type='str'),
+ dest=dict(type='path', required=True),
+ backup=dict(type='bool', default=False),
+ remote_src=dict(type='bool', default=True),
+ regexp=dict(type='str'),
+ ignore_hidden=dict(type='bool', default=False),
+ validate=dict(type='str'),
+ ),
+ add_file_common_args=True,
+ )
+
+ changed = False
+ path_hash = None
+ dest_hash = None
+ src = module.params['src']
+ dest = module.params['dest']
+ backup = module.params['backup']
+ delimiter = module.params['delimiter']
+ regexp = module.params['regexp']
+ compiled_regexp = None
+ ignore_hidden = module.params['ignore_hidden']
+ validate = module.params.get('validate', None)
+
+ result = dict(src=src, dest=dest)
+ if not os.path.exists(src):
+ module.fail_json(msg="Source (%s) does not exist" % src)
+
+ if not os.path.isdir(src):
+ module.fail_json(msg="Source (%s) is not a directory" % src)
+
+ if regexp is not None:
+ try:
+ compiled_regexp = re.compile(regexp)
+ except re.error as e:
+ module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (to_native(e), regexp))
+
+ if validate and "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % validate)
+
+ path = assemble_from_fragments(src, delimiter, compiled_regexp, ignore_hidden, module.tmpdir)
+ path_hash = module.sha1(path)
+ result['checksum'] = path_hash
+
+ # Backwards compat. This won't return data if FIPS mode is active
+ try:
+ pathmd5 = module.md5(path)
+ except ValueError:
+ pathmd5 = None
+ result['md5sum'] = pathmd5
+
+ if os.path.exists(dest):
+ dest_hash = module.sha1(dest)
+
+ if path_hash != dest_hash:
+ if validate:
+ (rc, out, err) = module.run_command(validate % path)
+ result['validation'] = dict(rc=rc, stdout=out, stderr=err)
+ if rc != 0:
+ cleanup(path)
+ module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc, err))
+ if backup and dest_hash is not None:
+ result['backup_file'] = module.backup_local(dest)
+
+ module.atomic_move(path, dest, unsafe_writes=module.params['unsafe_writes'])
+ changed = True
+
+ cleanup(path, result)
+
+ # handle file permissions
+ file_args = module.load_file_common_arguments(module.params)
+ result['changed'] = module.set_fs_attributes_if_different(file_args, changed)
+
+ # Mission complete
+ result['msg'] = "OK"
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/assert.py b/lib/ansible/modules/assert.py
new file mode 100644
index 0000000..0ef5eb0
--- /dev/null
+++ b/lib/ansible/modules/assert.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: assert
+short_description: Asserts given expressions are true
+description:
+ - This module asserts that given expressions are true with an optional custom message.
+ - This module is also supported for Windows targets.
+version_added: "1.5"
+options:
+ that:
+ description:
+ - A list of string expressions of the same form that can be passed to the 'when' statement.
+ type: list
+ elements: str
+ required: true
+ fail_msg:
+ description:
+ - The customized message used for a failing assertion.
+ - This argument was called 'msg' before Ansible 2.7, now it is renamed to 'fail_msg' with alias 'msg'.
+ type: str
+ aliases: [ msg ]
+ version_added: "2.7"
+ success_msg:
+ description:
+ - The customized message used for a successful assertion.
+ type: str
+ version_added: "2.7"
+ quiet:
+ description:
+ - Set this to C(true) to avoid verbose output.
+ type: bool
+ default: no
+ version_added: "2.8"
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ become:
+ support: none
+ bypass_host_loop:
+ support: none
+ connection:
+ support: none
+ check_mode:
+ support: full
+ delegation:
+ support: none
+ details: Aside from C(register) and/or in combination with C(delegate_facts), it has little effect.
+ diff_mode:
+ support: none
+ platform:
+ platforms: all
+seealso:
+- module: ansible.builtin.debug
+- module: ansible.builtin.fail
+- module: ansible.builtin.meta
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- ansible.builtin.assert: { that: "ansible_os_family != 'RedHat'" }
+
+- ansible.builtin.assert:
+ that:
+ - "'foo' in some_command_result.stdout"
+ - number_of_the_counting == 3
+
+- name: After version 2.7 both 'msg' and 'fail_msg' can customize failing assertion message
+ ansible.builtin.assert:
+ that:
+ - my_param <= 100
+ - my_param >= 0
+ fail_msg: "'my_param' must be between 0 and 100"
+ success_msg: "'my_param' is between 0 and 100"
+
+- name: Please use 'msg' when ansible version is smaller than 2.7
+ ansible.builtin.assert:
+ that:
+ - my_param <= 100
+ - my_param >= 0
+ msg: "'my_param' must be between 0 and 100"
+
+- name: Use quiet to avoid verbose output
+ ansible.builtin.assert:
+ that:
+ - my_param <= 100
+ - my_param >= 0
+ quiet: true
+'''
diff --git a/lib/ansible/modules/async_status.py b/lib/ansible/modules/async_status.py
new file mode 100644
index 0000000..3609c46
--- /dev/null
+++ b/lib/ansible/modules/async_status.py
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: async_status
+short_description: Obtain status of asynchronous task
+description:
+- This module gets the status of an asynchronous task.
+- This module is also supported for Windows targets.
+version_added: "0.5"
+options:
+ jid:
+ description:
+ - Job or task identifier
+ type: str
+ required: true
+ mode:
+ description:
+ - If C(status), obtain the status.
+ - If C(cleanup), clean up the async job cache (by default in C(~/.ansible_async/)) for the specified job I(jid).
+ type: str
+ choices: [ cleanup, status ]
+ default: status
+extends_documentation_fragment:
+- action_common_attributes
+- action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+ bypass_host_loop:
+ support: none
+ platform:
+ support: full
+ platforms: posix, windows
+seealso:
+- ref: playbooks_async
+ description: Detailed information on how to use asynchronous actions and polling.
+author:
+- Ansible Core Team
+- Michael DeHaan
+'''
+
+EXAMPLES = r'''
+---
+- name: Asynchronous yum task
+ ansible.builtin.yum:
+ name: docker-io
+ state: present
+ async: 1000
+ poll: 0
+ register: yum_sleeper
+
+- name: Wait for asynchronous job to end
+ ansible.builtin.async_status:
+ jid: '{{ yum_sleeper.ansible_job_id }}'
+ register: job_result
+ until: job_result.finished
+ retries: 100
+ delay: 10
+'''
+
+RETURN = r'''
+ansible_job_id:
+ description: The asynchronous job id
+ returned: success
+ type: str
+ sample: '360874038559.4169'
+finished:
+ description: Whether the asynchronous job has finished (C(1)) or not (C(0))
+ returned: always
+ type: int
+ sample: 1
+started:
+ description: Whether the asynchronous job has started (C(1)) or not (C(0))
+ returned: always
+ type: int
+ sample: 1
+stdout:
+ description: Any output returned by async_wrapper
+ returned: always
+ type: str
+stderr:
+ description: Any errors returned by async_wrapper
+ returned: always
+ type: str
+erased:
+ description: Path to erased job file
+ returned: when file is erased
+ type: str
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_native
+
+
+def main():
+
+ module = AnsibleModule(argument_spec=dict(
+ jid=dict(type='str', required=True),
+ mode=dict(type='str', default='status', choices=['cleanup', 'status']),
+ # passed in from the async_status action plugin
+ _async_dir=dict(type='path', required=True),
+ ))
+
+ mode = module.params['mode']
+ jid = module.params['jid']
+ async_dir = module.params['_async_dir']
+
+ # setup logging directory
+ logdir = os.path.expanduser(async_dir)
+ log_path = os.path.join(logdir, jid)
+
+ if not os.path.exists(log_path):
+ module.fail_json(msg="could not find job", ansible_job_id=jid, started=1, finished=1)
+
+ if mode == 'cleanup':
+ os.unlink(log_path)
+ module.exit_json(ansible_job_id=jid, erased=log_path)
+
+ # NOT in cleanup mode, assume regular status mode
+ # no remote kill mode currently exists, but probably should
+ # consider log_path + ".pid" file and also unlink that above
+
+ data = None
+ try:
+ with open(log_path) as f:
+ data = json.loads(f.read())
+ except Exception:
+ if not data:
+ # file not written yet? That means it is running
+ module.exit_json(results_file=log_path, ansible_job_id=jid, started=1, finished=0)
+ else:
+ module.fail_json(ansible_job_id=jid, results_file=log_path,
+ msg="Could not parse job output: %s" % data, started=1, finished=1)
+
+ if 'started' not in data:
+ data['finished'] = 1
+ data['ansible_job_id'] = jid
+ elif 'finished' not in data:
+ data['finished'] = 0
+
+ # Fix error: TypeError: exit_json() keywords must be strings
+ data = {to_native(k): v for k, v in iteritems(data)}
+
+ module.exit_json(**data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/async_wrapper.py b/lib/ansible/modules/async_wrapper.py
new file mode 100644
index 0000000..4b1a5b3
--- /dev/null
+++ b/lib/ansible/modules/async_wrapper.py
@@ -0,0 +1,350 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import errno
+import json
+import shlex
+import shutil
+import os
+import subprocess
+import sys
+import traceback
+import signal
+import time
+import syslog
+import multiprocessing
+
+from ansible.module_utils._text import to_text, to_bytes
+
+PY3 = sys.version_info[0] == 3
+
+syslog.openlog('ansible-%s' % os.path.basename(__file__))
+syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
+
+# pipe for communication between forked process and parent
+ipc_watcher, ipc_notifier = multiprocessing.Pipe()
+
+job_path = ''
+
+
+def notice(msg):
+ syslog.syslog(syslog.LOG_NOTICE, msg)
+
+
+def end(res=None, exit_msg=0):
+ if res is not None:
+ print(json.dumps(res))
+ sys.stdout.flush()
+ sys.exit(exit_msg)
+
+
+def daemonize_self():
+ # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
+ try:
+ pid = os.fork()
+ if pid > 0:
+ # exit first parent
+ end()
+ except OSError:
+ e = sys.exc_info()[1]
+ end({'msg': "fork #1 failed: %d (%s)\n" % (e.errno, e.strerror), 'failed': True}, 1)
+
+ # decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks)
+ os.setsid()
+ os.umask(int('022', 8))
+
+ # do second fork
+ try:
+ pid = os.fork()
+ if pid > 0:
+ # TODO: print 'async_wrapper_pid': pid, but careful as it will pollute expected output.
+ end()
+ except OSError:
+ e = sys.exc_info()[1]
+ end({'msg': "fork #2 failed: %d (%s)\n" % (e.errno, e.strerror), 'failed': True}, 1)
+
+ dev_null = open('/dev/null', 'w')
+ os.dup2(dev_null.fileno(), sys.stdin.fileno())
+ os.dup2(dev_null.fileno(), sys.stdout.fileno())
+ os.dup2(dev_null.fileno(), sys.stderr.fileno())
+
+
+# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there.
+# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function.
+def _filter_non_json_lines(data):
+ '''
+ Used to filter unrelated output around module JSON output, like messages from
+ tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
+
+ Filters leading lines before first line-starting occurrence of '{', and filter all
+ trailing lines after matching close character (working from the bottom of output).
+ '''
+ warnings = []
+
+ # Filter initial junk
+ lines = data.splitlines()
+
+ for start, line in enumerate(lines):
+ line = line.strip()
+ if line.startswith(u'{'):
+ break
+ else:
+ raise ValueError('No start of json char found')
+
+ # Filter trailing junk
+ lines = lines[start:]
+
+ for reverse_end_offset, line in enumerate(reversed(lines)):
+ if line.strip().endswith(u'}'):
+ break
+ else:
+ raise ValueError('No end of json char found')
+
+ if reverse_end_offset > 0:
+ # Trailing junk is uncommon and can point to things the user might
+ # want to change. So print a warning if we find any
+ trailing_junk = lines[len(lines) - reverse_end_offset:]
+ warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
+
+ lines = lines[:(len(lines) - reverse_end_offset)]
+
+ return ('\n'.join(lines), warnings)
+
+
+def _get_interpreter(module_path):
+ with open(module_path, 'rb') as module_fd:
+ head = module_fd.read(1024)
+ if head[0:2] != b'#!':
+ return None
+ return head[2:head.index(b'\n')].strip().split(b' ')
+
+
+def _make_temp_dir(path):
+ # TODO: Add checks for permissions on path.
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+
+def jwrite(info):
+
+ jobfile = job_path + ".tmp"
+ tjob = open(jobfile, "w")
+ try:
+ tjob.write(json.dumps(info))
+ except (IOError, OSError) as e:
+ notice('failed to write to %s: %s' % (jobfile, str(e)))
+ raise e
+ finally:
+ tjob.close()
+ os.rename(jobfile, job_path)
+
+
+def _run_module(wrapped_cmd, jid):
+
+ jwrite({"started": 1, "finished": 0, "ansible_job_id": jid})
+
+ result = {}
+
+ # signal grandchild process started and isolated from being terminated
+ # by the connection being closed sending a signal to the job group
+ ipc_notifier.send(True)
+ ipc_notifier.close()
+
+ outdata = ''
+ filtered_outdata = ''
+ stderr = ''
+ try:
+ cmd = [to_bytes(c, errors='surrogate_or_strict') for c in shlex.split(wrapped_cmd)]
+ # call the module interpreter directly (for non-binary modules)
+ # this permits use of a script for an interpreter on non-Linux platforms
+ interpreter = _get_interpreter(cmd[0])
+ if interpreter:
+ cmd = interpreter + cmd
+ script = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ (outdata, stderr) = script.communicate()
+ if PY3:
+ outdata = outdata.decode('utf-8', 'surrogateescape')
+ stderr = stderr.decode('utf-8', 'surrogateescape')
+
+ (filtered_outdata, json_warnings) = _filter_non_json_lines(outdata)
+
+ result = json.loads(filtered_outdata)
+
+ if json_warnings:
+ # merge JSON junk warnings with any existing module warnings
+ module_warnings = result.get('warnings', [])
+ if not isinstance(module_warnings, list):
+ module_warnings = [module_warnings]
+ module_warnings.extend(json_warnings)
+ result['warnings'] = module_warnings
+
+ if stderr:
+ result['stderr'] = stderr
+ jwrite(result)
+
+ except (OSError, IOError):
+ e = sys.exc_info()[1]
+ result = {
+ "failed": 1,
+ "cmd": wrapped_cmd,
+ "msg": to_text(e),
+ "outdata": outdata, # temporary notice only
+ "stderr": stderr
+ }
+ result['ansible_job_id'] = jid
+ jwrite(result)
+
+ except (ValueError, Exception):
+ result = {
+ "failed": 1,
+ "cmd": wrapped_cmd,
+ "data": outdata, # temporary notice only
+ "stderr": stderr,
+ "msg": traceback.format_exc()
+ }
+ result['ansible_job_id'] = jid
+ jwrite(result)
+
+
+def main():
+ if len(sys.argv) < 5:
+ end({
+ "failed": True,
+ "msg": "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile> [-preserve_tmp] "
+ "Humans, do not call directly!"
+ }, 1)
+
+ jid = "%s.%d" % (sys.argv[1], os.getpid())
+ time_limit = sys.argv[2]
+ wrapped_module = sys.argv[3]
+ argsfile = sys.argv[4]
+ if '-tmp-' not in os.path.dirname(wrapped_module):
+ preserve_tmp = True
+ elif len(sys.argv) > 5:
+ preserve_tmp = sys.argv[5] == '-preserve_tmp'
+ else:
+ preserve_tmp = False
+ # consider underscore as no argsfile so we can support passing of additional positional parameters
+ if argsfile != '_':
+ cmd = "%s %s" % (wrapped_module, argsfile)
+ else:
+ cmd = wrapped_module
+ step = 5
+
+ async_dir = os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
+
+ # setup job output directory
+ jobdir = os.path.expanduser(async_dir)
+ global job_path
+ job_path = os.path.join(jobdir, jid)
+
+ try:
+ _make_temp_dir(jobdir)
+ except Exception as e:
+ end({
+ "failed": 1,
+ "msg": "could not create directory: %s - %s" % (jobdir, to_text(e)),
+ "exception": to_text(traceback.format_exc()),
+ }, 1)
+
+ # immediately exit this process, leaving an orphaned process
+ # running which immediately forks a supervisory timing process
+
+ try:
+ pid = os.fork()
+ if pid:
+ # Notify the overlord that the async process started
+
+ # we need to not return immediately such that the launched command has an attempt
+ # to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
+ # this probably could be done with some IPC later. Modules should always read
+ # the argsfile at the very first start of their execution anyway
+
+ # close off notifier handle in grandparent, probably unnecessary as
+ # this process doesn't hang around long enough
+ ipc_notifier.close()
+
+ # allow waiting up to 2.5 seconds in total should be long enough for worst
+ # loaded environment in practice.
+ retries = 25
+ while retries > 0:
+ if ipc_watcher.poll(0.1):
+ break
+ else:
+ retries = retries - 1
+ continue
+
+ notice("Return async_wrapper task started.")
+ end({"failed": 0, "started": 1, "finished": 0, "ansible_job_id": jid, "results_file": job_path,
+ "_ansible_suppress_tmpdir_delete": (not preserve_tmp)}, 0)
+ else:
+ # The actual wrapper process
+
+ # close off the receiving end of the pipe from child process
+ ipc_watcher.close()
+
+ # Daemonize, so we keep on running
+ daemonize_self()
+
+ # we are now daemonized, create a supervisory process
+ notice("Starting module and watcher")
+
+ sub_pid = os.fork()
+ if sub_pid:
+ # close off inherited pipe handles
+ ipc_watcher.close()
+ ipc_notifier.close()
+
+ # the parent stops the process after the time limit
+ remaining = int(time_limit)
+
+ # set the child process group id to kill all children
+ os.setpgid(sub_pid, sub_pid)
+
+ notice("Start watching %s (%s)" % (sub_pid, remaining))
+ time.sleep(step)
+ while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
+ notice("%s still running (%s)" % (sub_pid, remaining))
+ time.sleep(step)
+ remaining = remaining - step
+ if remaining <= 0:
+ # ensure we leave response in poll location
+ res = {'msg': 'Timeout exceeded', 'failed': True, 'child_pid': sub_pid}
+ jwrite(res)
+
+ # actually kill it
+ notice("Timeout reached, now killing %s" % (sub_pid))
+ os.killpg(sub_pid, signal.SIGKILL)
+ notice("Sent kill to group %s " % sub_pid)
+ time.sleep(1)
+ if not preserve_tmp:
+ shutil.rmtree(os.path.dirname(wrapped_module), True)
+ end(res)
+ notice("Done in kid B.")
+ if not preserve_tmp:
+ shutil.rmtree(os.path.dirname(wrapped_module), True)
+ end()
+ else:
+ # the child process runs the actual module
+ notice("Start module (%s)" % os.getpid())
+ _run_module(cmd, jid)
+ notice("Module complete (%s)" % os.getpid())
+
+ except Exception as e:
+ notice("error: %s" % e)
+ end({"failed": True, "msg": "FATAL ERROR: %s" % e}, "async_wrapper exited prematurely")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/blockinfile.py b/lib/ansible/modules/blockinfile.py
new file mode 100644
index 0000000..63fc021
--- /dev/null
+++ b/lib/ansible/modules/blockinfile.py
@@ -0,0 +1,387 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, 2015 YAEGASHI Takeshi <yaegashi@debian.org>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: blockinfile
+short_description: Insert/update/remove a text block surrounded by marker lines
+version_added: '2.0'
+description:
+- This module will insert/update/remove a block of multi-line text surrounded by customizable marker lines.
+author:
+- Yaegashi Takeshi (@yaegashi)
+options:
+ path:
+ description:
+ - The file to modify.
+ - Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name).
+ type: path
+ required: yes
+ aliases: [ dest, destfile, name ]
+ state:
+ description:
+ - Whether the block should be there or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ marker:
+ description:
+ - The marker line template.
+ - C({mark}) will be replaced with the values in C(marker_begin) (default="BEGIN") and C(marker_end) (default="END").
+ - Using a custom marker without the C({mark}) variable may result in the block being repeatedly inserted on subsequent playbook runs.
+ - Multi-line markers are not supported and will result in the block being repeatedly inserted on subsequent playbook runs.
+ - A newline is automatically appended by the module to C(marker_begin) and C(marker_end).
+ type: str
+ default: '# {mark} ANSIBLE MANAGED BLOCK'
+ block:
+ description:
+ - The text to insert inside the marker lines.
+ - If it is missing or an empty string, the block will be removed as if C(state) were specified to C(absent).
+ type: str
+ default: ''
+ aliases: [ content ]
+ insertafter:
+ description:
+ - If specified and no begin/ending C(marker) lines are found, the block will be inserted after the last match of specified regular expression.
+ - A special value is available; C(EOF) for inserting the block at the end of the file.
+ - If specified regular expression has no matches, C(EOF) will be used instead.
+ - The presence of the multiline flag (?m) in the regular expression controls whether the match is done line by line or with multiple lines.
+ This behaviour was added in ansible-core 2.14.
+ type: str
+ choices: [ EOF, '*regex*' ]
+ default: EOF
+ insertbefore:
+ description:
+ - If specified and no begin/ending C(marker) lines are found, the block will be inserted before the last match of specified regular expression.
+ - A special value is available; C(BOF) for inserting the block at the beginning of the file.
+ - If specified regular expression has no matches, the block will be inserted at the end of the file.
+ - The presence of the multiline flag (?m) in the regular expression controls whether the match is done line by line or with multiple lines.
+ This behaviour was added in ansible-core 2.14.
+ type: str
+ choices: [ BOF, '*regex*' ]
+ create:
+ description:
+ - Create a new file if it does not exist.
+ type: bool
+ default: no
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ marker_begin:
+ description:
+ - This will be inserted at C({mark}) in the opening ansible block marker.
+ type: str
+ default: BEGIN
+ version_added: '2.5'
+ marker_end:
+ required: false
+ description:
+ - This will be inserted at C({mark}) in the closing ansible block marker.
+ type: str
+ default: END
+ version_added: '2.5'
+notes:
+ - When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+ - Option I(follow) has been removed in Ansible 2.5, because this module modifies the contents of the file so I(follow=no) doesn't make sense.
+ - When more then one block should be handled in one file you must change the I(marker) per task.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.files
+ - files
+ - validate
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ safe_file_operations:
+ support: full
+ platform:
+ support: full
+ platforms: posix
+ vault:
+ support: none
+'''
+
+EXAMPLES = r'''
+# Before Ansible 2.3, option 'dest' or 'name' was used instead of 'path'
+- name: Insert/Update "Match User" configuration block in /etc/ssh/sshd_config
+ ansible.builtin.blockinfile:
+ path: /etc/ssh/sshd_config
+ block: |
+ Match User ansible-agent
+ PasswordAuthentication no
+
+- name: Insert/Update eth0 configuration stanza in /etc/network/interfaces
+ (it might be better to copy files into /etc/network/interfaces.d/)
+ ansible.builtin.blockinfile:
+ path: /etc/network/interfaces
+ block: |
+ iface eth0 inet static
+ address 192.0.2.23
+ netmask 255.255.255.0
+
+- name: Insert/Update configuration using a local file and validate it
+ ansible.builtin.blockinfile:
+ block: "{{ lookup('ansible.builtin.file', './local/sshd_config') }}"
+ path: /etc/ssh/sshd_config
+ backup: yes
+ validate: /usr/sbin/sshd -T -f %s
+
+- name: Insert/Update HTML surrounded by custom markers after <body> line
+ ansible.builtin.blockinfile:
+ path: /var/www/html/index.html
+ marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
+ insertafter: "<body>"
+ block: |
+ <h1>Welcome to {{ ansible_hostname }}</h1>
+ <p>Last updated on {{ ansible_date_time.iso8601 }}</p>
+
+- name: Remove HTML as well as surrounding markers
+ ansible.builtin.blockinfile:
+ path: /var/www/html/index.html
+ marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
+ block: ""
+
+- name: Add mappings to /etc/hosts
+ ansible.builtin.blockinfile:
+ path: /etc/hosts
+ block: |
+ {{ item.ip }} {{ item.name }}
+ marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item.name }}"
+ loop:
+ - { name: host1, ip: 10.10.1.10 }
+ - { name: host2, ip: 10.10.1.11 }
+ - { name: host3, ip: 10.10.1.12 }
+
+- name: Search with a multiline search flags regex and if found insert after
+ blockinfile:
+ path: listener.ora
+ block: "{{ listener_line | indent(width=8, first=True) }}"
+ insertafter: '(?m)SID_LIST_LISTENER_DG =\n.*\(SID_LIST ='
+ marker: " <!-- {mark} ANSIBLE MANAGED BLOCK -->"
+
+'''
+
+import re
+import os
+import tempfile
+from ansible.module_utils.six import b
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def write_changes(module, contents, path):
+
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ f = os.fdopen(tmpfd, 'wb')
+ f.write(contents)
+ f.close()
+
+ validate = module.params.get('validate', None)
+ valid = not validate
+ if validate:
+ if "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % (validate))
+ (rc, out, err) = module.run_command(validate % tmpfile)
+ valid = rc == 0
+ if rc != 0:
+ module.fail_json(msg='failed to validate: '
+ 'rc:%s error:%s' % (rc, err))
+ if valid:
+ module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
+
+
+def check_file_attrs(module, changed, message, diff):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_file_attributes_if_different(file_args, False, diff=diff):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ marker=dict(type='str', default='# {mark} ANSIBLE MANAGED BLOCK'),
+ block=dict(type='str', default='', aliases=['content']),
+ insertafter=dict(type='str'),
+ insertbefore=dict(type='str'),
+ create=dict(type='bool', default=False),
+ backup=dict(type='bool', default=False),
+ validate=dict(type='str'),
+ marker_begin=dict(type='str', default='BEGIN'),
+ marker_end=dict(type='str', default='END'),
+ ),
+ mutually_exclusive=[['insertbefore', 'insertafter']],
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+ params = module.params
+ path = params['path']
+
+ if os.path.isdir(path):
+ module.fail_json(rc=256,
+ msg='Path %s is a directory !' % path)
+
+ path_exists = os.path.exists(path)
+ if not path_exists:
+ if not module.boolean(params['create']):
+ module.fail_json(rc=257,
+ msg='Path %s does not exist !' % path)
+ destpath = os.path.dirname(path)
+ if not os.path.exists(destpath) and not module.check_mode:
+ try:
+ os.makedirs(destpath)
+ except Exception as e:
+ module.fail_json(msg='Error creating %s Error code: %s Error description: %s' % (destpath, e[0], e[1]))
+ original = None
+ lines = []
+ else:
+ with open(path, 'rb') as f:
+ original = f.read()
+ lines = original.splitlines(True)
+
+ diff = {'before': '',
+ 'after': '',
+ 'before_header': '%s (content)' % path,
+ 'after_header': '%s (content)' % path}
+
+ if module._diff and original:
+ diff['before'] = original
+
+ insertbefore = params['insertbefore']
+ insertafter = params['insertafter']
+ block = to_bytes(params['block'])
+ marker = to_bytes(params['marker'])
+ present = params['state'] == 'present'
+
+ if not present and not path_exists:
+ module.exit_json(changed=False, msg="File %s not present" % path)
+
+ if insertbefore is None and insertafter is None:
+ insertafter = 'EOF'
+
+ if insertafter not in (None, 'EOF'):
+ insertre = re.compile(to_bytes(insertafter, errors='surrogate_or_strict'))
+ elif insertbefore not in (None, 'BOF'):
+ insertre = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict'))
+ else:
+ insertre = None
+
+ marker0 = re.sub(b(r'{mark}'), b(params['marker_begin']), marker) + b(os.linesep)
+ marker1 = re.sub(b(r'{mark}'), b(params['marker_end']), marker) + b(os.linesep)
+ if present and block:
+ if not block.endswith(b(os.linesep)):
+ block += b(os.linesep)
+ blocklines = [marker0] + block.splitlines(True) + [marker1]
+ else:
+ blocklines = []
+
+ n0 = n1 = None
+ for i, line in enumerate(lines):
+ if line == marker0:
+ n0 = i
+ if line == marker1:
+ n1 = i
+
+ if None in (n0, n1):
+ n0 = None
+ if insertre is not None:
+ if insertre.flags & re.MULTILINE:
+ match = insertre.search(original)
+ if match:
+ if insertafter:
+ n0 = to_native(original).count('\n', 0, match.end())
+ elif insertbefore:
+ n0 = to_native(original).count('\n', 0, match.start())
+ else:
+ for i, line in enumerate(lines):
+ if insertre.search(line):
+ n0 = i
+ if n0 is None:
+ n0 = len(lines)
+ elif insertafter is not None:
+ n0 += 1
+ elif insertbefore is not None:
+ n0 = 0 # insertbefore=BOF
+ else:
+ n0 = len(lines) # insertafter=EOF
+ elif n0 < n1:
+ lines[n0:n1 + 1] = []
+ else:
+ lines[n1:n0 + 1] = []
+ n0 = n1
+
+ # Ensure there is a line separator before the block of lines to be inserted
+ if n0 > 0:
+ if not lines[n0 - 1].endswith(b(os.linesep)):
+ lines[n0 - 1] += b(os.linesep)
+
+ lines[n0:n0] = blocklines
+ if lines:
+ result = b''.join(lines)
+ else:
+ result = b''
+
+ if module._diff:
+ diff['after'] = result
+
+ if original == result:
+ msg = ''
+ changed = False
+ elif original is None:
+ msg = 'File created'
+ changed = True
+ elif not blocklines:
+ msg = 'Block removed'
+ changed = True
+ else:
+ msg = 'Block inserted'
+ changed = True
+
+ backup_file = None
+ if changed and not module.check_mode:
+ if module.boolean(params['backup']) and path_exists:
+ backup_file = module.backup_local(path)
+ # We should always follow symlinks so that we change the real file
+ real_path = os.path.realpath(params['path'])
+ write_changes(module, result, real_path)
+
+ if module.check_mode and not path_exists:
+ module.exit_json(changed=changed, msg=msg, diff=diff)
+
+ attr_diff = {}
+ msg, changed = check_file_attrs(module, changed, msg, attr_diff)
+
+ attr_diff['before_header'] = '%s (file attributes)' % path
+ attr_diff['after_header'] = '%s (file attributes)' % path
+
+ difflist = [diff, attr_diff]
+
+ if backup_file is None:
+ module.exit_json(changed=changed, msg=msg, diff=difflist)
+ else:
+ module.exit_json(changed=changed, msg=msg, diff=difflist, backup_file=backup_file)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/command.py b/lib/ansible/modules/command.py
new file mode 100644
index 0000000..490c0ca
--- /dev/null
+++ b/lib/ansible/modules/command.py
@@ -0,0 +1,352 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
+# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: command
+short_description: Execute commands on targets
+version_added: historical
+description:
+ - The C(command) module takes the command name followed by a list of space-delimited arguments.
+ - The given command will be executed on all selected nodes.
+ - The command(s) will not be
+ processed through the shell, so variables like C($HOSTNAME) and operations
+ like C("*"), C("<"), C(">"), C("|"), C(";") and C("&") will not work.
+ Use the M(ansible.builtin.shell) module if you need these features.
+ - To create C(command) tasks that are easier to read than the ones using space-delimited
+ arguments, pass parameters using the C(args) L(task keyword,https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html#task)
+ or use C(cmd) parameter.
+ - Either a free form command or C(cmd) parameter is required, see the examples.
+ - For Windows targets, use the M(ansible.windows.win_command) module instead.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.raw
+attributes:
+ check_mode:
+ details: while the command itself is arbitrary and cannot be subject to the check mode semantics it adds C(creates)/C(removes) options as a workaround
+ support: partial
+ diff_mode:
+ support: none
+ platform:
+ support: full
+ platforms: posix
+ raw:
+ support: full
+options:
+ free_form:
+ description:
+ - The command module takes a free form string as a command to run.
+ - There is no actual parameter named 'free form'.
+ cmd:
+ type: str
+ description:
+ - The command to run.
+ argv:
+ type: list
+ elements: str
+ description:
+ - Passes the command as a list rather than a string.
+ - Use C(argv) to avoid quoting values that would otherwise be interpreted incorrectly (for example "user name").
+ - Only the string (free form) or the list (argv) form can be provided, not both. One or the other must be provided.
+ version_added: "2.6"
+ creates:
+ type: path
+ description:
+ - A filename or (since 2.0) glob pattern. If a matching file already exists, this step B(will not) be run.
+ - This is checked before I(removes) is checked.
+ removes:
+ type: path
+ description:
+ - A filename or (since 2.0) glob pattern. If a matching file exists, this step B(will) be run.
+ - This is checked after I(creates) is checked.
+ version_added: "0.8"
+ chdir:
+ type: path
+ description:
+ - Change into this directory before running the command.
+ version_added: "0.6"
+ stdin:
+ description:
+ - Set the stdin of the command directly to the specified value.
+ type: str
+ version_added: "2.4"
+ stdin_add_newline:
+ type: bool
+ default: yes
+ description:
+ - If set to C(true), append a newline to stdin data.
+ version_added: "2.8"
+ strip_empty_ends:
+ description:
+ - Strip empty lines from the end of stdout/stderr in result.
+ version_added: "2.8"
+ type: bool
+ default: yes
+notes:
+ - If you want to run a command through the shell (say you are using C(<), C(>), C(|), and so on),
+ you actually want the M(ansible.builtin.shell) module instead.
+ Parsing shell metacharacters can lead to unexpected commands being executed if quoting is not done correctly so it is more secure to
+ use the C(command) module when possible.
+ - C(creates), C(removes), and C(chdir) can be specified after the command.
+ For instance, if you only want to run a command if a certain file does not exist, use this.
+ - Check mode is supported when passing C(creates) or C(removes). If running in check mode and either of these are specified, the module will
+ check for the existence of the file and report the correct changed status. If these are not supplied, the task will be skipped.
+ - The C(executable) parameter is removed since version 2.4. If you have a need for this parameter, use the M(ansible.builtin.shell) module instead.
+ - For Windows targets, use the M(ansible.windows.win_command) module instead.
+ - For rebooting systems, use the M(ansible.builtin.reboot) or M(ansible.windows.win_reboot) module.
+seealso:
+- module: ansible.builtin.raw
+- module: ansible.builtin.script
+- module: ansible.builtin.shell
+- module: ansible.windows.win_command
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Return motd to registered var
+ ansible.builtin.command: cat /etc/motd
+ register: mymotd
+
+# free-form (string) arguments, all arguments on one line
+- name: Run command if /path/to/database does not exist (without 'args')
+ ansible.builtin.command: /usr/bin/make_database.sh db_user db_name creates=/path/to/database
+
+# free-form (string) arguments, some arguments on separate lines with the 'args' keyword
+# 'args' is a task keyword, passed at the same level as the module
+- name: Run command if /path/to/database does not exist (with 'args' keyword)
+ ansible.builtin.command: /usr/bin/make_database.sh db_user db_name
+ args:
+ creates: /path/to/database
+
+# 'cmd' is module parameter
+- name: Run command if /path/to/database does not exist (with 'cmd' parameter)
+ ansible.builtin.command:
+ cmd: /usr/bin/make_database.sh db_user db_name
+ creates: /path/to/database
+
+- name: Change the working directory to somedir/ and run the command as db_owner if /path/to/database does not exist
+ ansible.builtin.command: /usr/bin/make_database.sh db_user db_name
+ become: yes
+ become_user: db_owner
+ args:
+ chdir: somedir/
+ creates: /path/to/database
+
+# argv (list) arguments, each argument on a separate line, 'args' keyword not necessary
+# 'argv' is a parameter, indented one level from the module
+- name: Use 'argv' to send a command as a list - leave 'command' empty
+ ansible.builtin.command:
+ argv:
+ - /usr/bin/make_database.sh
+ - Username with whitespace
+ - dbname with whitespace
+ creates: /path/to/database
+
+- name: Safely use templated variable to run command. Always use the quote filter to avoid injection issues
+ ansible.builtin.command: cat {{ myfile|quote }}
+ register: myoutput
+'''
+
+RETURN = r'''
+msg:
+ description: changed
+ returned: always
+ type: bool
+ sample: True
+start:
+ description: The command execution start time.
+ returned: always
+ type: str
+ sample: '2017-09-29 22:03:48.083128'
+end:
+ description: The command execution end time.
+ returned: always
+ type: str
+ sample: '2017-09-29 22:03:48.084657'
+delta:
+ description: The command execution delta time.
+ returned: always
+ type: str
+ sample: '0:00:00.001529'
+stdout:
+ description: The command standard output.
+ returned: always
+ type: str
+ sample: 'Clustering node rabbit@slave1 with rabbit@master …'
+stderr:
+ description: The command standard error.
+ returned: always
+ type: str
+ sample: 'ls cannot access foo: No such file or directory'
+cmd:
+ description: The command executed by the task.
+ returned: always
+ type: list
+ sample:
+ - echo
+ - hello
+rc:
+ description: The command return code (0 means success).
+ returned: always
+ type: int
+ sample: 0
+stdout_lines:
+ description: The command standard output split in lines.
+ returned: always
+ type: list
+ sample: [u'Clustering node rabbit@slave1 with rabbit@master …']
+stderr_lines:
+ description: The command standard error split in lines.
+ returned: always
+ type: list
+ sample: [u'ls cannot access foo: No such file or directory', u'ls …']
+'''
+
+import datetime
+import glob
+import os
+import shlex
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native, to_bytes, to_text
+from ansible.module_utils.common.collections import is_iterable
+
+
+def main():
+
+ # the command module is the one ansible module that does not take key=value args
+ # hence don't copy this one if you are looking to build others!
+ # NOTE: ensure splitter.py is kept in sync for exceptions
+ module = AnsibleModule(
+ argument_spec=dict(
+ _raw_params=dict(),
+ _uses_shell=dict(type='bool', default=False),
+ argv=dict(type='list', elements='str'),
+ chdir=dict(type='path'),
+ executable=dict(),
+ creates=dict(type='path'),
+ removes=dict(type='path'),
+ # The default for this really comes from the action plugin
+ stdin=dict(required=False),
+ stdin_add_newline=dict(type='bool', default=True),
+ strip_empty_ends=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+ shell = module.params['_uses_shell']
+ chdir = module.params['chdir']
+ executable = module.params['executable']
+ args = module.params['_raw_params']
+ argv = module.params['argv']
+ creates = module.params['creates']
+ removes = module.params['removes']
+ stdin = module.params['stdin']
+ stdin_add_newline = module.params['stdin_add_newline']
+ strip = module.params['strip_empty_ends']
+
+ # we promissed these in 'always' ( _lines get autoaded on action plugin)
+ r = {'changed': False, 'stdout': '', 'stderr': '', 'rc': None, 'cmd': None, 'start': None, 'end': None, 'delta': None, 'msg': ''}
+
+ if not shell and executable:
+ module.warn("As of Ansible 2.4, the parameter 'executable' is no longer supported with the 'command' module. Not using '%s'." % executable)
+ executable = None
+
+ if (not args or args.strip() == '') and not argv:
+ r['rc'] = 256
+ r['msg'] = "no command given"
+ module.fail_json(**r)
+
+ if args and argv:
+ r['rc'] = 256
+ r['msg'] = "only command or argv can be given, not both"
+ module.fail_json(**r)
+
+ if not shell and args:
+ args = shlex.split(args)
+
+ args = args or argv
+ # All args must be strings
+ if is_iterable(args, include_strings=False):
+ args = [to_native(arg, errors='surrogate_or_strict', nonstring='simplerepr') for arg in args]
+
+ r['cmd'] = args
+
+ if chdir:
+ chdir = to_bytes(chdir, errors='surrogate_or_strict')
+
+ try:
+ os.chdir(chdir)
+ except (IOError, OSError) as e:
+ r['msg'] = 'Unable to change directory before execution: %s' % to_text(e)
+ module.fail_json(**r)
+
+ # check_mode partial support, since it only really works in checking creates/removes
+ if module.check_mode:
+ shoulda = "Would"
+ else:
+ shoulda = "Did"
+
+ # special skips for idempotence if file exists (assumes command creates)
+ if creates:
+ if glob.glob(creates):
+ r['msg'] = "%s not run command since '%s' exists" % (shoulda, creates)
+ r['stdout'] = "skipped, since %s exists" % creates # TODO: deprecate
+
+ r['rc'] = 0
+
+ # special skips for idempotence if file does not exist (assumes command removes)
+ if not r['msg'] and removes:
+ if not glob.glob(removes):
+ r['msg'] = "%s not run command since '%s' does not exist" % (shoulda, removes)
+ r['stdout'] = "skipped, since %s does not exist" % removes # TODO: deprecate
+ r['rc'] = 0
+
+ if r['msg']:
+ module.exit_json(**r)
+
+ r['changed'] = True
+
+ # actually executes command (or not ...)
+ if not module.check_mode:
+ r['start'] = datetime.datetime.now()
+ r['rc'], r['stdout'], r['stderr'] = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None,
+ data=stdin, binary_data=(not stdin_add_newline))
+ r['end'] = datetime.datetime.now()
+ else:
+ # this is partial check_mode support, since we end up skipping if we get here
+ r['rc'] = 0
+ r['msg'] = "Command would have run if not in check mode"
+ if creates is None and removes is None:
+ r['skipped'] = True
+ # skipped=True and changed=True are mutually exclusive
+ r['changed'] = False
+
+ # convert to text for jsonization and usability
+ if r['start'] is not None and r['end'] is not None:
+ # these are datetime objects, but need them as strings to pass back
+ r['delta'] = to_text(r['end'] - r['start'])
+ r['end'] = to_text(r['end'])
+ r['start'] = to_text(r['start'])
+
+ if strip:
+ r['stdout'] = to_text(r['stdout']).rstrip("\r\n")
+ r['stderr'] = to_text(r['stderr']).rstrip("\r\n")
+
+ if r['rc'] != 0:
+ r['msg'] = 'non-zero return code'
+ module.fail_json(**r)
+
+ module.exit_json(**r)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/copy.py b/lib/ansible/modules/copy.py
new file mode 100644
index 0000000..37115fa
--- /dev/null
+++ b/lib/ansible/modules/copy.py
@@ -0,0 +1,825 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: copy
+version_added: historical
+short_description: Copy files to remote locations
+description:
+ - The C(copy) module copies a file from the local or remote machine to a location on the remote machine.
+ - Use the M(ansible.builtin.fetch) module to copy files from remote locations to the local box.
+ - If you need variable interpolation in copied files, use the M(ansible.builtin.template) module.
+ Using a variable in the C(content) field will result in unpredictable output.
+ - For Windows targets, use the M(ansible.windows.win_copy) module instead.
+options:
+ src:
+ description:
+ - Local path to a file to copy to the remote server.
+ - This can be absolute or relative.
+ - If path is a directory, it is copied recursively. In this case, if path ends
+ with "/", only inside contents of that directory are copied to destination.
+ Otherwise, if it does not end with "/", the directory itself with all contents
+ is copied. This behavior is similar to the C(rsync) command line tool.
+ type: path
+ content:
+ description:
+ - When used instead of C(src), sets the contents of a file directly to the specified value.
+ - Works only when C(dest) is a file. Creates the file if it does not exist.
+ - For advanced formatting or if C(content) contains a variable, use the
+ M(ansible.builtin.template) module.
+ type: str
+ version_added: '1.1'
+ dest:
+ description:
+ - Remote absolute path where the file should be copied to.
+ - If C(src) is a directory, this must be a directory too.
+ - If C(dest) is a non-existent path and if either C(dest) ends with "/" or C(src) is a directory, C(dest) is created.
+ - If I(dest) is a relative path, the starting directory is determined by the remote host.
+ - If C(src) and C(dest) are files, the parent directory of C(dest) is not created and the task fails if it does not already exist.
+ type: path
+ required: yes
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ version_added: '0.7'
+ force:
+ description:
+ - Influence whether the remote file must always be replaced.
+ - If C(true), the remote file will be replaced when contents are different than the source.
+ - If C(false), the file will only be transferred if the destination does not exist.
+ type: bool
+ default: yes
+ version_added: '1.1'
+ mode:
+ description:
+ - The permissions of the destination file or directory.
+ - For those used to C(/usr/bin/chmod) remember that modes are actually octal numbers.
+ You must either add a leading zero so that Ansible's YAML parser knows it is an octal number
+ (like C(0644) or C(01777)) or quote it (like C('644') or C('1777')) so Ansible receives a string
+ and can do its own conversion from string into number. Giving Ansible a number without following
+ one of these rules will end up with a decimal number which will have unexpected results.
+ - As of Ansible 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or C(u=rw,g=r,o=r)).
+ - As of Ansible 2.3, the mode may also be the special string C(preserve).
+ - C(preserve) means that the file will be given the same permissions as the source file.
+ - When doing a recursive copy, see also C(directory_mode).
+ - If C(mode) is not specified and the destination file B(does not) exist, the default C(umask) on the system will be used
+ when setting the mode for the newly created file.
+ - If C(mode) is not specified and the destination file B(does) exist, the mode of the existing file will be used.
+ - Specifying C(mode) is the best way to ensure files are created with the correct permissions.
+ See CVE-2020-1736 for further details.
+ directory_mode:
+ description:
+ - When doing a recursive copy set the mode for the directories.
+ - If this is not set we will use the system defaults.
+ - The mode is only set on directories which are newly created, and will not affect those that already existed.
+ type: raw
+ version_added: '1.5'
+ remote_src:
+ description:
+ - Influence whether C(src) needs to be transferred or already is present remotely.
+ - If C(false), it will search for C(src) on the controller node.
+ - If C(true) it will search for C(src) on the managed (remote) node.
+ - C(remote_src) supports recursive copying as of version 2.8.
+ - C(remote_src) only works with C(mode=preserve) as of version 2.6.
+ - Autodecryption of files does not work when C(remote_src=yes).
+ type: bool
+ default: no
+ version_added: '2.0'
+ follow:
+ description:
+ - This flag indicates that filesystem links in the destination, if they exist, should be followed.
+ type: bool
+ default: no
+ version_added: '1.8'
+ local_follow:
+ description:
+ - This flag indicates that filesystem links in the source tree, if they exist, should be followed.
+ type: bool
+ default: yes
+ version_added: '2.4'
+ checksum:
+ description:
+ - SHA1 checksum of the file being transferred.
+ - Used to validate that the copy of the file was successful.
+ - If this is not provided, ansible will use the local calculated checksum of the src file.
+ type: str
+ version_added: '2.5'
+extends_documentation_fragment:
+ - decrypt
+ - files
+ - validate
+ - action_common_attributes
+ - action_common_attributes.files
+ - action_common_attributes.flow
+notes:
+ - The M(ansible.builtin.copy) module recursively copy facility does not scale to lots (>hundreds) of files.
+seealso:
+ - module: ansible.builtin.assemble
+ - module: ansible.builtin.fetch
+ - module: ansible.builtin.file
+ - module: ansible.builtin.template
+ - module: ansible.posix.synchronize
+ - module: ansible.windows.win_copy
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix
+ safe_file_operations:
+ support: full
+ vault:
+ support: full
+ version_added: '2.2'
+'''
+
+EXAMPLES = r'''
+- name: Copy file with owner and permissions
+ ansible.builtin.copy:
+ src: /srv/myfiles/foo.conf
+ dest: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: '0644'
+
+- name: Copy file with owner and permission, using symbolic representation
+ ansible.builtin.copy:
+ src: /srv/myfiles/foo.conf
+ dest: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: u=rw,g=r,o=r
+
+- name: Another symbolic mode example, adding some permissions and removing others
+ ansible.builtin.copy:
+ src: /srv/myfiles/foo.conf
+ dest: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: u+rw,g-wx,o-rwx
+
+- name: Copy a new "ntp.conf" file into place, backing up the original if it differs from the copied version
+ ansible.builtin.copy:
+ src: /mine/ntp.conf
+ dest: /etc/ntp.conf
+ owner: root
+ group: root
+ mode: '0644'
+ backup: yes
+
+- name: Copy a new "sudoers" file into place, after passing validation with visudo
+ ansible.builtin.copy:
+ src: /mine/sudoers
+ dest: /etc/sudoers
+ validate: /usr/sbin/visudo -csf %s
+
+- name: Copy a "sudoers" file on the remote machine for editing
+ ansible.builtin.copy:
+ src: /etc/sudoers
+ dest: /etc/sudoers.edit
+ remote_src: yes
+ validate: /usr/sbin/visudo -csf %s
+
+- name: Copy using inline content
+ ansible.builtin.copy:
+ content: '# This file was moved to /etc/other.conf'
+ dest: /etc/mine.conf
+
+- name: If follow=yes, /path/to/file will be overwritten by contents of foo.conf
+ ansible.builtin.copy:
+ src: /etc/foo.conf
+ dest: /path/to/link # link to /path/to/file
+ follow: yes
+
+- name: If follow=no, /path/to/link will become a file and be overwritten by contents of foo.conf
+ ansible.builtin.copy:
+ src: /etc/foo.conf
+ dest: /path/to/link # link to /path/to/file
+ follow: no
+'''
+
+RETURN = r'''
+dest:
+ description: Destination file/path.
+ returned: success
+ type: str
+ sample: /path/to/file.txt
+src:
+ description: Source file used for the copy on the target machine.
+ returned: changed
+ type: str
+ sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source
+md5sum:
+ description: MD5 checksum of the file after running copy.
+ returned: when supported
+ type: str
+ sample: 2a5aeecc61dc98c4d780b14b330e3282
+checksum:
+ description: SHA1 checksum of the file after running copy.
+ returned: success
+ type: str
+ sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
+backup_file:
+ description: Name of backup file created.
+ returned: changed and if backup=yes
+ type: str
+ sample: /path/to/file.txt.2015-02-12@22:09~
+gid:
+ description: Group id of the file, after execution.
+ returned: success
+ type: int
+ sample: 100
+group:
+ description: Group of the file, after execution.
+ returned: success
+ type: str
+ sample: httpd
+owner:
+ description: Owner of the file, after execution.
+ returned: success
+ type: str
+ sample: httpd
+uid:
+ description: Owner id of the file, after execution.
+ returned: success
+ type: int
+ sample: 100
+mode:
+ description: Permissions of the target, after execution.
+ returned: success
+ type: str
+ sample: "0644"
+size:
+ description: Size of the target, after execution.
+ returned: success
+ type: int
+ sample: 1220
+state:
+ description: State of the target, after execution.
+ returned: success
+ type: str
+ sample: file
+'''
+
+import errno
+import filecmp
+import grp
+import os
+import os.path
+import platform
+import pwd
+import shutil
+import stat
+import tempfile
+import traceback
+
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.six import PY3
+
+
+# The AnsibleModule object
+module = None
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+
+# Once we get run_command moved into common, we can move this into a common/files module. We can't
+# until then because of the module.run_command() method. We may need to move it into
+# basic::AnsibleModule() until then but if so, make it a private function so that we don't have to
+# keep it for backwards compatibility later.
+def clear_facls(path):
+ setfacl = get_bin_path('setfacl')
+ # FIXME "setfacl -b" is available on Linux and FreeBSD. There is "setfacl -D e" on z/OS. Others?
+ acl_command = [setfacl, '-b', path]
+ b_acl_command = [to_bytes(x) for x in acl_command]
+ locale = get_best_parsable_locale(module)
+ rc, out, err = module.run_command(b_acl_command, environ_update=dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale))
+ if rc != 0:
+ raise RuntimeError('Error running "{0}": stdout: "{1}"; stderr: "{2}"'.format(' '.join(b_acl_command), out, err))
+
+
+def split_pre_existing_dir(dirname):
+ '''
+ Return the first pre-existing directory and a list of the new directories that will be created.
+ '''
+ head, tail = os.path.split(dirname)
+ b_head = to_bytes(head, errors='surrogate_or_strict')
+ if head == '':
+ return ('.', [tail])
+ if not os.path.exists(b_head):
+ if head == '/':
+ raise AnsibleModuleError(results={'msg': "The '/' directory doesn't exist on this machine."})
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
+ else:
+ return (head, [tail])
+ new_directory_list.append(tail)
+ return (pre_existing_dir, new_directory_list)
+
+
+def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
+ '''
+ Walk the new directories list and make sure that permissions are as we would expect
+ '''
+
+ if new_directory_list:
+ working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0))
+ directory_args['path'] = working_dir
+ changed = module.set_fs_attributes_if_different(directory_args, changed)
+ changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
+ return changed
+
+
+def chown_recursive(path, module):
+ changed = False
+ owner = module.params['owner']
+ group = module.params['group']
+
+ if owner is not None:
+ if not module.check_mode:
+ for dirpath, dirnames, filenames in os.walk(path):
+ owner_changed = module.set_owner_if_different(dirpath, owner, False)
+ if owner_changed is True:
+ changed = owner_changed
+ for dir in [os.path.join(dirpath, d) for d in dirnames]:
+ owner_changed = module.set_owner_if_different(dir, owner, False)
+ if owner_changed is True:
+ changed = owner_changed
+ for file in [os.path.join(dirpath, f) for f in filenames]:
+ owner_changed = module.set_owner_if_different(file, owner, False)
+ if owner_changed is True:
+ changed = owner_changed
+ else:
+ uid = pwd.getpwnam(owner).pw_uid
+ for dirpath, dirnames, filenames in os.walk(path):
+ owner_changed = (os.stat(dirpath).st_uid != uid)
+ if owner_changed is True:
+ changed = owner_changed
+ for dir in [os.path.join(dirpath, d) for d in dirnames]:
+ owner_changed = (os.stat(dir).st_uid != uid)
+ if owner_changed is True:
+ changed = owner_changed
+ for file in [os.path.join(dirpath, f) for f in filenames]:
+ owner_changed = (os.stat(file).st_uid != uid)
+ if owner_changed is True:
+ changed = owner_changed
+ if group is not None:
+ if not module.check_mode:
+ for dirpath, dirnames, filenames in os.walk(path):
+ group_changed = module.set_group_if_different(dirpath, group, False)
+ if group_changed is True:
+ changed = group_changed
+ for dir in [os.path.join(dirpath, d) for d in dirnames]:
+ group_changed = module.set_group_if_different(dir, group, False)
+ if group_changed is True:
+ changed = group_changed
+ for file in [os.path.join(dirpath, f) for f in filenames]:
+ group_changed = module.set_group_if_different(file, group, False)
+ if group_changed is True:
+ changed = group_changed
+ else:
+ gid = grp.getgrnam(group).gr_gid
+ for dirpath, dirnames, filenames in os.walk(path):
+ group_changed = (os.stat(dirpath).st_gid != gid)
+ if group_changed is True:
+ changed = group_changed
+ for dir in [os.path.join(dirpath, d) for d in dirnames]:
+ group_changed = (os.stat(dir).st_gid != gid)
+ if group_changed is True:
+ changed = group_changed
+ for file in [os.path.join(dirpath, f) for f in filenames]:
+ group_changed = (os.stat(file).st_gid != gid)
+ if group_changed is True:
+ changed = group_changed
+
+ return changed
+
+
+def copy_diff_files(src, dest, module):
+ """Copy files that are different between `src` directory and `dest` directory."""
+
+ changed = False
+ owner = module.params['owner']
+ group = module.params['group']
+ local_follow = module.params['local_follow']
+ diff_files = filecmp.dircmp(src, dest).diff_files
+ if len(diff_files):
+ changed = True
+ if not module.check_mode:
+ for item in diff_files:
+ src_item_path = os.path.join(src, item)
+ dest_item_path = os.path.join(dest, item)
+ b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
+ b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
+ if os.path.islink(b_src_item_path) and local_follow is False:
+ linkto = os.readlink(b_src_item_path)
+ os.symlink(linkto, b_dest_item_path)
+ else:
+ shutil.copyfile(b_src_item_path, b_dest_item_path)
+ shutil.copymode(b_src_item_path, b_dest_item_path)
+
+ if owner is not None:
+ module.set_owner_if_different(b_dest_item_path, owner, False)
+ if group is not None:
+ module.set_group_if_different(b_dest_item_path, group, False)
+ changed = True
+ return changed
+
+
+def copy_left_only(src, dest, module):
+ """Copy files that exist in `src` directory only to the `dest` directory."""
+
+ changed = False
+ owner = module.params['owner']
+ group = module.params['group']
+ local_follow = module.params['local_follow']
+ left_only = filecmp.dircmp(src, dest).left_only
+ if len(left_only):
+ changed = True
+ if not module.check_mode:
+ for item in left_only:
+ src_item_path = os.path.join(src, item)
+ dest_item_path = os.path.join(dest, item)
+ b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
+ b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
+
+ if os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path) and local_follow is True:
+ shutil.copytree(b_src_item_path, b_dest_item_path, symlinks=not local_follow)
+ chown_recursive(b_dest_item_path, module)
+
+ if os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path) and local_follow is False:
+ linkto = os.readlink(b_src_item_path)
+ os.symlink(linkto, b_dest_item_path)
+
+ if os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path) and local_follow is True:
+ shutil.copyfile(b_src_item_path, b_dest_item_path)
+ if owner is not None:
+ module.set_owner_if_different(b_dest_item_path, owner, False)
+ if group is not None:
+ module.set_group_if_different(b_dest_item_path, group, False)
+
+ if os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path) and local_follow is False:
+ linkto = os.readlink(b_src_item_path)
+ os.symlink(linkto, b_dest_item_path)
+
+ if not os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path):
+ shutil.copyfile(b_src_item_path, b_dest_item_path)
+ shutil.copymode(b_src_item_path, b_dest_item_path)
+
+ if owner is not None:
+ module.set_owner_if_different(b_dest_item_path, owner, False)
+ if group is not None:
+ module.set_group_if_different(b_dest_item_path, group, False)
+
+ if not os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path):
+ shutil.copytree(b_src_item_path, b_dest_item_path, symlinks=not local_follow)
+ chown_recursive(b_dest_item_path, module)
+
+ changed = True
+ return changed
+
+
+def copy_common_dirs(src, dest, module):
+ changed = False
+ common_dirs = filecmp.dircmp(src, dest).common_dirs
+ for item in common_dirs:
+ src_item_path = os.path.join(src, item)
+ dest_item_path = os.path.join(dest, item)
+ b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
+ b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
+ diff_files_changed = copy_diff_files(b_src_item_path, b_dest_item_path, module)
+ left_only_changed = copy_left_only(b_src_item_path, b_dest_item_path, module)
+ if diff_files_changed or left_only_changed:
+ changed = True
+
+ # recurse into subdirectory
+ changed = changed or copy_common_dirs(os.path.join(src, item), os.path.join(dest, item), module)
+ return changed
+
+
+def main():
+
+ global module
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ src=dict(type='path'),
+ _original_basename=dict(type='str'), # used to handle 'dest is a directory' via template, a slight hack
+ content=dict(type='str', no_log=True),
+ dest=dict(type='path', required=True),
+ backup=dict(type='bool', default=False),
+ force=dict(type='bool', default=True),
+ validate=dict(type='str'),
+ directory_mode=dict(type='raw'),
+ remote_src=dict(type='bool'),
+ local_follow=dict(type='bool'),
+ checksum=dict(type='str'),
+ follow=dict(type='bool', default=False),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ src = module.params['src']
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+ dest = module.params['dest']
+ # Make sure we always have a directory component for later processing
+ if os.path.sep not in dest:
+ dest = '.{0}{1}'.format(os.path.sep, dest)
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ backup = module.params['backup']
+ force = module.params['force']
+ _original_basename = module.params.get('_original_basename', None)
+ validate = module.params.get('validate', None)
+ follow = module.params['follow']
+ local_follow = module.params['local_follow']
+ mode = module.params['mode']
+ owner = module.params['owner']
+ group = module.params['group']
+ remote_src = module.params['remote_src']
+ checksum = module.params['checksum']
+
+ if not os.path.exists(b_src):
+ module.fail_json(msg="Source %s not found" % (src))
+ if not os.access(b_src, os.R_OK):
+ module.fail_json(msg="Source %s not readable" % (src))
+
+ # Preserve is usually handled in the action plugin but mode + remote_src has to be done on the
+ # remote host
+ if module.params['mode'] == 'preserve':
+ module.params['mode'] = '0%03o' % stat.S_IMODE(os.stat(b_src).st_mode)
+ mode = module.params['mode']
+
+ changed = False
+
+ checksum_dest = None
+ checksum_src = None
+ md5sum_src = None
+
+ if os.path.isfile(src):
+ try:
+ checksum_src = module.sha1(src)
+ except (OSError, IOError) as e:
+ module.warn("Unable to calculate src checksum, assuming change: %s" % to_native(e))
+ try:
+ # Backwards compat only. This will be None in FIPS mode
+ md5sum_src = module.md5(src)
+ except ValueError:
+ pass
+ elif remote_src and not os.path.isdir(src):
+ module.fail_json("Cannot copy invalid source '%s': not a file" % to_native(src))
+
+ if checksum and checksum_src != checksum:
+ module.fail_json(
+ msg='Copied file does not match the expected checksum. Transfer failed.',
+ checksum=checksum_src,
+ expected_checksum=checksum
+ )
+
+ # Special handling for recursive copy - create intermediate dirs
+ if dest.endswith(os.sep):
+ if _original_basename:
+ dest = os.path.join(dest, _original_basename)
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ dirname = os.path.dirname(dest)
+ b_dirname = to_bytes(dirname, errors='surrogate_or_strict')
+ if not os.path.exists(b_dirname):
+ try:
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname)
+ except AnsibleModuleError as e:
+ e.result['msg'] += ' Could not copy to {0}'.format(dest)
+ module.fail_json(**e.results)
+
+ os.makedirs(b_dirname)
+ directory_args = module.load_file_common_arguments(module.params)
+ directory_mode = module.params["directory_mode"]
+ if directory_mode is not None:
+ directory_args['mode'] = directory_mode
+ else:
+ directory_args['mode'] = None
+ adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
+
+ if os.path.isdir(b_dest):
+ basename = os.path.basename(src)
+ if _original_basename:
+ basename = _original_basename
+ dest = os.path.join(dest, basename)
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+
+ if os.path.exists(b_dest):
+ if os.path.islink(b_dest) and follow:
+ b_dest = os.path.realpath(b_dest)
+ dest = to_native(b_dest, errors='surrogate_or_strict')
+ if not force:
+ module.exit_json(msg="file already exists", src=src, dest=dest, changed=False)
+ if os.access(b_dest, os.R_OK) and os.path.isfile(b_dest):
+ checksum_dest = module.sha1(dest)
+ else:
+ if not os.path.exists(os.path.dirname(b_dest)):
+ try:
+ # os.path.exists() can return false in some
+ # circumstances where the directory does not have
+ # the execute bit for the current user set, in
+ # which case the stat() call will raise an OSError
+ os.stat(os.path.dirname(b_dest))
+ except OSError as e:
+ if "permission denied" in to_native(e).lower():
+ module.fail_json(msg="Destination directory %s is not accessible" % (os.path.dirname(dest)))
+ module.fail_json(msg="Destination directory %s does not exist" % (os.path.dirname(dest)))
+
+ if not os.access(os.path.dirname(b_dest), os.W_OK) and not module.params['unsafe_writes']:
+ module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest)))
+
+ backup_file = None
+ if checksum_src != checksum_dest or os.path.islink(b_dest):
+
+ if not module.check_mode:
+ try:
+ if backup:
+ if os.path.exists(b_dest):
+ backup_file = module.backup_local(dest)
+ # allow for conversion from symlink.
+ if os.path.islink(b_dest):
+ os.unlink(b_dest)
+ open(b_dest, 'w').close()
+ if validate:
+ # if we have a mode, make sure we set it on the temporary
+ # file source as some validations may require it
+ if mode is not None:
+ module.set_mode_if_different(src, mode, False)
+ if owner is not None:
+ module.set_owner_if_different(src, owner, False)
+ if group is not None:
+ module.set_group_if_different(src, group, False)
+ if "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % (validate))
+ (rc, out, err) = module.run_command(validate % src)
+ if rc != 0:
+ module.fail_json(msg="failed to validate", exit_status=rc, stdout=out, stderr=err)
+
+ b_mysrc = b_src
+ if remote_src and os.path.isfile(b_src):
+
+ _, b_mysrc = tempfile.mkstemp(dir=os.path.dirname(b_dest))
+
+ shutil.copyfile(b_src, b_mysrc)
+ try:
+ shutil.copystat(b_src, b_mysrc)
+ except OSError as err:
+ if err.errno == errno.ENOSYS and mode == "preserve":
+ module.warn("Unable to copy stats {0}".format(to_native(b_src)))
+ else:
+ raise
+
+ # might be needed below
+ if PY3 and hasattr(os, 'listxattr'):
+ try:
+ src_has_acls = 'system.posix_acl_access' in os.listxattr(src)
+ except Exception as e:
+ # assume unwanted ACLs by default
+ src_has_acls = True
+
+ # at this point we should always have tmp file
+ module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes'])
+
+ if PY3 and hasattr(os, 'listxattr') and platform.system() == 'Linux' and not remote_src:
+ # atomic_move used above to copy src into dest might, in some cases,
+ # use shutil.copy2 which in turn uses shutil.copystat.
+ # Since Python 3.3, shutil.copystat copies file extended attributes:
+ # https://docs.python.org/3/library/shutil.html#shutil.copystat
+ # os.listxattr (along with others) was added to handle the operation.
+
+ # This means that on Python 3 we are copying the extended attributes which includes
+ # the ACLs on some systems - further limited to Linux as the documentation above claims
+ # that the extended attributes are copied only on Linux. Also, os.listxattr is only
+ # available on Linux.
+
+ # If not remote_src, then the file was copied from the controller. In that
+ # case, any filesystem ACLs are artifacts of the copy rather than preservation
+ # of existing attributes. Get rid of them:
+
+ if src_has_acls:
+ # FIXME If dest has any default ACLs, there are not applied to src now because
+ # they were overridden by copystat. Should/can we do anything about this?
+ # 'system.posix_acl_default' in os.listxattr(os.path.dirname(b_dest))
+
+ try:
+ clear_facls(dest)
+ except ValueError as e:
+ if 'setfacl' in to_native(e):
+ # No setfacl so we're okay. The controller couldn't have set a facl
+ # without the setfacl command
+ pass
+ else:
+ raise
+ except RuntimeError as e:
+ # setfacl failed.
+ if 'Operation not supported' in to_native(e):
+ # The file system does not support ACLs.
+ pass
+ else:
+ raise
+
+ except (IOError, OSError):
+ module.fail_json(msg="failed to copy: %s to %s" % (src, dest), traceback=traceback.format_exc())
+ changed = True
+ else:
+ changed = False
+
+ # If neither have checksums, both src and dest are directories.
+ if checksum_src is None and checksum_dest is None:
+ if remote_src and os.path.isdir(module.params['src']):
+ b_src = to_bytes(module.params['src'], errors='surrogate_or_strict')
+ b_dest = to_bytes(module.params['dest'], errors='surrogate_or_strict')
+
+ if src.endswith(os.path.sep) and os.path.isdir(module.params['dest']):
+ diff_files_changed = copy_diff_files(b_src, b_dest, module)
+ left_only_changed = copy_left_only(b_src, b_dest, module)
+ common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
+ owner_group_changed = chown_recursive(b_dest, module)
+ if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
+ changed = True
+
+ if src.endswith(os.path.sep) and not os.path.exists(module.params['dest']):
+ b_basename = to_bytes(os.path.basename(src), errors='surrogate_or_strict')
+ b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
+ b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
+ if not module.check_mode:
+ shutil.copytree(b_src, b_dest, symlinks=not local_follow)
+ chown_recursive(dest, module)
+ changed = True
+
+ if not src.endswith(os.path.sep) and os.path.isdir(module.params['dest']):
+ b_basename = to_bytes(os.path.basename(src), errors='surrogate_or_strict')
+ b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
+ b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
+ if not module.check_mode and not os.path.exists(b_dest):
+ shutil.copytree(b_src, b_dest, symlinks=not local_follow)
+ changed = True
+ chown_recursive(dest, module)
+ if module.check_mode and not os.path.exists(b_dest):
+ changed = True
+ if os.path.exists(b_dest):
+ diff_files_changed = copy_diff_files(b_src, b_dest, module)
+ left_only_changed = copy_left_only(b_src, b_dest, module)
+ common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
+ owner_group_changed = chown_recursive(b_dest, module)
+ if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
+ changed = True
+
+ if not src.endswith(os.path.sep) and not os.path.exists(module.params['dest']):
+ b_basename = to_bytes(os.path.basename(module.params['src']), errors='surrogate_or_strict')
+ b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
+ if not module.check_mode and not os.path.exists(b_dest):
+ os.makedirs(b_dest)
+ b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
+ diff_files_changed = copy_diff_files(b_src, b_dest, module)
+ left_only_changed = copy_left_only(b_src, b_dest, module)
+ common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
+ owner_group_changed = chown_recursive(b_dest, module)
+ if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
+ changed = True
+ if module.check_mode and not os.path.exists(b_dest):
+ changed = True
+
+ res_args = dict(
+ dest=dest, src=src, md5sum=md5sum_src, checksum=checksum_src, changed=changed
+ )
+ if backup_file:
+ res_args['backup_file'] = backup_file
+
+ if not module.check_mode:
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cron.py b/lib/ansible/modules/cron.py
new file mode 100644
index 0000000..9b4c96c
--- /dev/null
+++ b/lib/ansible/modules/cron.py
@@ -0,0 +1,765 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dane Summers <dsummers@pinedesk.biz>
+# Copyright: (c) 2013, Mike Grozak <mike.grozak@gmail.com>
+# Copyright: (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
+# Copyright: (c) 2015, Evan Kaufman <evan@digitalflophouse.com>
+# Copyright: (c) 2015, Luca Berruti <nadirio@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: cron
+short_description: Manage cron.d and crontab entries
+description:
+ - Use this module to manage crontab and environment variables entries. This module allows
+ you to create environment variables and named crontab entries, update, or delete them.
+ - 'When crontab jobs are managed: the module includes one line with the description of the
+ crontab entry C("#Ansible: <name>") corresponding to the "name" passed to the module,
+ which is used by future ansible/module calls to find/check the state. The "name"
+ parameter should be unique, and changing the "name" value will result in a new cron
+ task being created (or a different one being removed).'
+ - When environment variables are managed, no comment line is added, but, when the module
+ needs to find/check the state, it uses the "name" parameter to find the environment
+ variable definition line.
+ - When using symbols such as %, they must be properly escaped.
+version_added: "0.9"
+options:
+ name:
+ description:
+ - Description of a crontab entry or, if env is set, the name of environment variable.
+ - This parameter is always required as of ansible-core 2.12.
+ type: str
+ required: yes
+ user:
+ description:
+ - The specific user whose crontab should be modified.
+ - When unset, this parameter defaults to the current user.
+ type: str
+ job:
+ description:
+ - The command to execute or, if env is set, the value of environment variable.
+ - The command should not contain line breaks.
+ - Required if I(state=present).
+ type: str
+ aliases: [ value ]
+ state:
+ description:
+ - Whether to ensure the job or environment variable is present or absent.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ cron_file:
+ description:
+ - If specified, uses this file instead of an individual user's crontab.
+ The assumption is that this file is exclusively managed by the module,
+ do not use if the file contains multiple entries, NEVER use for /etc/crontab.
+ - If this is a relative path, it is interpreted with respect to I(/etc/cron.d).
+ - Many linux distros expect (and some require) the filename portion to consist solely
+ of upper- and lower-case letters, digits, underscores, and hyphens.
+ - Using this parameter requires you to specify the I(user) as well, unless I(state) is not I(present).
+ - Either this parameter or I(name) is required
+ type: path
+ backup:
+ description:
+ - If set, create a backup of the crontab before it is modified.
+ The location of the backup is returned in the C(backup_file) variable by this module.
+ type: bool
+ default: no
+ minute:
+ description:
+ - Minute when the job should run (C(0-59), C(*), C(*/2), and so on).
+ type: str
+ default: "*"
+ hour:
+ description:
+ - Hour when the job should run (C(0-23), C(*), C(*/2), and so on).
+ type: str
+ default: "*"
+ day:
+ description:
+ - Day of the month the job should run (C(1-31), C(*), C(*/2), and so on).
+ type: str
+ default: "*"
+ aliases: [ dom ]
+ month:
+ description:
+ - Month of the year the job should run (C(1-12), C(*), C(*/2), and so on).
+ type: str
+ default: "*"
+ weekday:
+ description:
+ - Day of the week that the job should run (C(0-6) for Sunday-Saturday, C(*), and so on).
+ type: str
+ default: "*"
+ aliases: [ dow ]
+ special_time:
+ description:
+ - Special time specification nickname.
+ type: str
+ choices: [ annually, daily, hourly, monthly, reboot, weekly, yearly ]
+ version_added: "1.3"
+ disabled:
+ description:
+ - If the job should be disabled (commented out) in the crontab.
+ - Only has effect if I(state=present).
+ type: bool
+ default: no
+ version_added: "2.0"
+ env:
+ description:
+ - If set, manages a crontab's environment variable.
+ - New variables are added on top of crontab.
+ - I(name) and I(value) parameters are the name and the value of environment variable.
+ type: bool
+ default: false
+ version_added: "2.1"
+ insertafter:
+ description:
+ - Used with I(state=present) and I(env).
+ - If specified, the environment variable will be inserted after the declaration of specified environment variable.
+ type: str
+ version_added: "2.1"
+ insertbefore:
+ description:
+ - Used with I(state=present) and I(env).
+ - If specified, the environment variable will be inserted before the declaration of specified environment variable.
+ type: str
+ version_added: "2.1"
+requirements:
+ - cron (any 'vixie cron' conformant variant, like cronie)
+author:
+ - Dane Summers (@dsummersl)
+ - Mike Grozak (@rhaido)
+ - Patrick Callahan (@dirtyharrycallahan)
+ - Evan Kaufman (@EvanK)
+ - Luca Berruti (@lberruti)
+extends_documentation_fragment:
+ - action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ support: full
+ platforms: posix
+'''
+
+EXAMPLES = r'''
+- name: Ensure a job that runs at 2 and 5 exists. Creates an entry like "0 5,2 * * ls -alh > /dev/null"
+ ansible.builtin.cron:
+ name: "check dirs"
+ minute: "0"
+ hour: "5,2"
+ job: "ls -alh > /dev/null"
+
+- name: 'Ensure an old job is no longer present. Removes any job that is prefixed by "#Ansible: an old job" from the crontab'
+ ansible.builtin.cron:
+ name: "an old job"
+ state: absent
+
+- name: Creates an entry like "@reboot /some/job.sh"
+ ansible.builtin.cron:
+ name: "a job for reboot"
+ special_time: reboot
+ job: "/some/job.sh"
+
+- name: Creates an entry like "PATH=/opt/bin" on top of crontab
+ ansible.builtin.cron:
+ name: PATH
+ env: yes
+ job: /opt/bin
+
+- name: Creates an entry like "APP_HOME=/srv/app" and insert it after PATH declaration
+ ansible.builtin.cron:
+ name: APP_HOME
+ env: yes
+ job: /srv/app
+ insertafter: PATH
+
+- name: Creates a cron file under /etc/cron.d
+ ansible.builtin.cron:
+ name: yum autoupdate
+ weekday: "2"
+ minute: "0"
+ hour: "12"
+ user: root
+ job: "YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate"
+ cron_file: ansible_yum-autoupdate
+
+- name: Removes a cron file from under /etc/cron.d
+ ansible.builtin.cron:
+ name: "yum autoupdate"
+ cron_file: ansible_yum-autoupdate
+ state: absent
+
+- name: Removes "APP_HOME" environment variable from crontab
+ ansible.builtin.cron:
+ name: APP_HOME
+ env: yes
+ state: absent
+'''
+
+RETURN = r'''#'''
+
+import os
+import platform
+import pwd
+import re
+import sys
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+from ansible.module_utils.six.moves import shlex_quote
+
+
+class CronTabError(Exception):
+ pass
+
+
+class CronTab(object):
+ """
+ CronTab object to write time based crontab file
+
+ user - the user of the crontab (defaults to current user)
+ cron_file - a cron file under /etc/cron.d, or an absolute path
+ """
+
+ def __init__(self, module, user=None, cron_file=None):
+ self.module = module
+ self.user = user
+ self.root = (os.getuid() == 0)
+ self.lines = None
+ self.ansible = "#Ansible: "
+ self.n_existing = ''
+ self.cron_cmd = self.module.get_bin_path('crontab', required=True)
+
+ if cron_file:
+
+ if os.path.isabs(cron_file):
+ self.cron_file = cron_file
+ self.b_cron_file = to_bytes(cron_file, errors='surrogate_or_strict')
+ else:
+ self.cron_file = os.path.join('/etc/cron.d', cron_file)
+ self.b_cron_file = os.path.join(b'/etc/cron.d', to_bytes(cron_file, errors='surrogate_or_strict'))
+ else:
+ self.cron_file = None
+
+ self.read()
+
+ def read(self):
+ # Read in the crontab from the system
+ self.lines = []
+ if self.cron_file:
+ # read the cronfile
+ try:
+ f = open(self.b_cron_file, 'rb')
+ self.n_existing = to_native(f.read(), errors='surrogate_or_strict')
+ self.lines = self.n_existing.splitlines()
+ f.close()
+ except IOError:
+ # cron file does not exist
+ return
+ except Exception:
+ raise CronTabError("Unexpected error:", sys.exc_info()[0])
+ else:
+ # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
+ (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
+
+ if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
+ raise CronTabError("Unable to read crontab")
+
+ self.n_existing = out
+
+ lines = out.splitlines()
+ count = 0
+ for l in lines:
+ if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
+ not re.match(r'# \(/tmp/.*installed on.*\)', l) and
+ not re.match(r'# \(.*version.*\)', l)):
+ self.lines.append(l)
+ else:
+ pattern = re.escape(l) + '[\r\n]?'
+ self.n_existing = re.sub(pattern, '', self.n_existing, 1)
+ count += 1
+
+ def is_empty(self):
+ if len(self.lines) == 0:
+ return True
+ else:
+ for line in self.lines:
+ if line.strip():
+ return False
+ return True
+
+ def write(self, backup_file=None):
+ """
+ Write the crontab to the system. Saves all information.
+ """
+ if backup_file:
+ fileh = open(backup_file, 'wb')
+ elif self.cron_file:
+ fileh = open(self.b_cron_file, 'wb')
+ else:
+ filed, path = tempfile.mkstemp(prefix='crontab')
+ os.chmod(path, int('0644', 8))
+ fileh = os.fdopen(filed, 'wb')
+
+ fileh.write(to_bytes(self.render()))
+ fileh.close()
+
+ # return if making a backup
+ if backup_file:
+ return
+
+ # Add the entire crontab back to the user crontab
+ if not self.cron_file:
+ # quoting shell args for now but really this should be two non-shell calls. FIXME
+ (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
+ os.unlink(path)
+
+ if rc != 0:
+ self.module.fail_json(msg=err)
+
+ # set SELinux permissions
+ if self.module.selinux_enabled() and self.cron_file:
+ self.module.set_default_selinux_context(self.cron_file, False)
+
+ def do_comment(self, name):
+ return "%s%s" % (self.ansible, name)
+
+ def add_job(self, name, job):
+ # Add the comment
+ self.lines.append(self.do_comment(name))
+
+ # Add the job
+ self.lines.append("%s" % (job))
+
+ def update_job(self, name, job):
+ return self._update_job(name, job, self.do_add_job)
+
+ def do_add_job(self, lines, comment, job):
+ lines.append(comment)
+
+ lines.append("%s" % (job))
+
+ def remove_job(self, name):
+ return self._update_job(name, "", self.do_remove_job)
+
+ def do_remove_job(self, lines, comment, job):
+ return None
+
+ def add_env(self, decl, insertafter=None, insertbefore=None):
+ if not (insertafter or insertbefore):
+ self.lines.insert(0, decl)
+ return
+
+ if insertafter:
+ other_name = insertafter
+ elif insertbefore:
+ other_name = insertbefore
+ other_decl = self.find_env(other_name)
+ if len(other_decl) > 0:
+ if insertafter:
+ index = other_decl[0] + 1
+ elif insertbefore:
+ index = other_decl[0]
+ self.lines.insert(index, decl)
+ return
+
+ self.module.fail_json(msg="Variable named '%s' not found." % other_name)
+
+ def update_env(self, name, decl):
+ return self._update_env(name, decl, self.do_add_env)
+
+ def do_add_env(self, lines, decl):
+ lines.append(decl)
+
+ def remove_env(self, name):
+ return self._update_env(name, '', self.do_remove_env)
+
+ def do_remove_env(self, lines, decl):
+ return None
+
+ def remove_job_file(self):
+ try:
+ os.unlink(self.cron_file)
+ return True
+ except OSError:
+ # cron file does not exist
+ return False
+ except Exception:
+ raise CronTabError("Unexpected error:", sys.exc_info()[0])
+
+ def find_job(self, name, job=None):
+ # attempt to find job by 'Ansible:' header comment
+ comment = None
+ for l in self.lines:
+ if comment is not None:
+ if comment == name:
+ return [comment, l]
+ else:
+ comment = None
+ elif re.match(r'%s' % self.ansible, l):
+ comment = re.sub(r'%s' % self.ansible, '', l)
+
+ # failing that, attempt to find job by exact match
+ if job:
+ for i, l in enumerate(self.lines):
+ if l == job:
+ # if no leading ansible header, insert one
+ if not re.match(r'%s' % self.ansible, self.lines[i - 1]):
+ self.lines.insert(i, self.do_comment(name))
+ return [self.lines[i], l, True]
+ # if a leading blank ansible header AND job has a name, update header
+ elif name and self.lines[i - 1] == self.do_comment(None):
+ self.lines[i - 1] = self.do_comment(name)
+ return [self.lines[i - 1], l, True]
+
+ return []
+
+ def find_env(self, name):
+ for index, l in enumerate(self.lines):
+ if re.match(r'^%s=' % name, l):
+ return [index, l]
+
+ return []
+
+ def get_cron_job(self, minute, hour, day, month, weekday, job, special, disabled):
+ # normalize any leading/trailing newlines (ansible/ansible-modules-core#3791)
+ job = job.strip('\r\n')
+
+ if disabled:
+ disable_prefix = '#'
+ else:
+ disable_prefix = ''
+
+ if special:
+ if self.cron_file:
+ return "%s@%s %s %s" % (disable_prefix, special, self.user, job)
+ else:
+ return "%s@%s %s" % (disable_prefix, special, job)
+ else:
+ if self.cron_file:
+ return "%s%s %s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, self.user, job)
+ else:
+ return "%s%s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, job)
+
+ def get_jobnames(self):
+ jobnames = []
+
+ for l in self.lines:
+ if re.match(r'%s' % self.ansible, l):
+ jobnames.append(re.sub(r'%s' % self.ansible, '', l))
+
+ return jobnames
+
+ def get_envnames(self):
+ envnames = []
+
+ for l in self.lines:
+ if re.match(r'^\S+=', l):
+ envnames.append(l.split('=')[0])
+
+ return envnames
+
+ def _update_job(self, name, job, addlinesfunction):
+ ansiblename = self.do_comment(name)
+ newlines = []
+ comment = None
+
+ for l in self.lines:
+ if comment is not None:
+ addlinesfunction(newlines, comment, job)
+ comment = None
+ elif l == ansiblename:
+ comment = l
+ else:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ if len(newlines) == 0:
+ return True
+ else:
+ return False # TODO add some more error testing
+
+ def _update_env(self, name, decl, addenvfunction):
+ newlines = []
+
+ for l in self.lines:
+ if re.match(r'^%s=' % name, l):
+ addenvfunction(newlines, decl)
+ else:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def render(self):
+ """
+ Render this crontab as it would be in the crontab.
+ """
+ crons = []
+ for cron in self.lines:
+ crons.append(cron)
+
+ result = '\n'.join(crons)
+ if result:
+ result = result.rstrip('\r\n') + '\n'
+ return result
+
+ def _read_user_execute(self):
+ """
+ Returns the command line for reading a crontab
+ """
+ user = ''
+ if self.user:
+ if platform.system() == 'SunOS':
+ return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
+ elif platform.system() == 'AIX':
+ return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
+ elif platform.system() == 'HP-UX':
+ return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, '-l')
+
+ def _write_execute(self, path):
+ """
+ Return the command line for writing a crontab
+ """
+ user = ''
+ if self.user:
+ if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
+ return "chown %s %s ; su '%s' -c '%s %s'" % (
+ shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
+
+
+def main():
+ # The following example playbooks:
+ #
+ # - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
+ #
+ # - name: do the job
+ # cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
+ #
+ # - name: no job
+ # cron: name="an old job" state=absent
+ #
+ # - name: sets env
+ # cron: name="PATH" env=yes value="/bin:/usr/bin"
+ #
+ # Would produce:
+ # PATH=/bin:/usr/bin
+ # # Ansible: check dirs
+ # * * 5,2 * * ls -alh > /dev/null
+ # # Ansible: do the job
+ # * * 5,2 * * /some/dir/job.sh
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ user=dict(type='str'),
+ job=dict(type='str', aliases=['value']),
+ cron_file=dict(type='path'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ backup=dict(type='bool', default=False),
+ minute=dict(type='str', default='*'),
+ hour=dict(type='str', default='*'),
+ day=dict(type='str', default='*', aliases=['dom']),
+ month=dict(type='str', default='*'),
+ weekday=dict(type='str', default='*', aliases=['dow']),
+ special_time=dict(type='str', choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"]),
+ disabled=dict(type='bool', default=False),
+ env=dict(type='bool', default=False),
+ insertafter=dict(type='str'),
+ insertbefore=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['insertafter', 'insertbefore'],
+ ],
+ )
+
+ name = module.params['name']
+ user = module.params['user']
+ job = module.params['job']
+ cron_file = module.params['cron_file']
+ state = module.params['state']
+ backup = module.params['backup']
+ minute = module.params['minute']
+ hour = module.params['hour']
+ day = module.params['day']
+ month = module.params['month']
+ weekday = module.params['weekday']
+ special_time = module.params['special_time']
+ disabled = module.params['disabled']
+ env = module.params['env']
+ insertafter = module.params['insertafter']
+ insertbefore = module.params['insertbefore']
+ do_install = state == 'present'
+
+ changed = False
+ res_args = dict()
+ warnings = list()
+
+ if cron_file:
+
+ if cron_file == '/etc/crontab':
+ module.fail_json(msg="Will not manage /etc/crontab via cron_file, see documentation.")
+
+ cron_file_basename = os.path.basename(cron_file)
+ if not re.search(r'^[A-Z0-9_-]+$', cron_file_basename, re.I):
+ warnings.append('Filename portion of cron_file ("%s") should consist' % cron_file_basename +
+ ' solely of upper- and lower-case letters, digits, underscores, and hyphens')
+
+ # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
+ os.umask(int('022', 8))
+ crontab = CronTab(module, user, cron_file)
+
+ module.debug('cron instantiated - name: "%s"' % name)
+
+ if module._diff:
+ diff = dict()
+ diff['before'] = crontab.n_existing
+ if crontab.cron_file:
+ diff['before_header'] = crontab.cron_file
+ else:
+ if crontab.user:
+ diff['before_header'] = 'crontab for user "%s"' % crontab.user
+ else:
+ diff['before_header'] = 'crontab'
+
+ # --- user input validation ---
+
+ if special_time and \
+ (True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
+ module.fail_json(msg="You must specify time and date fields or special time.")
+
+ # cannot support special_time on solaris
+ if special_time and platform.system() == 'SunOS':
+ module.fail_json(msg="Solaris does not support special_time=... or @reboot")
+
+ if do_install:
+ if cron_file and not user:
+ module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well")
+
+ if job is None:
+ module.fail_json(msg="You must specify 'job' to install a new cron job or variable")
+
+ if (insertafter or insertbefore) and not env:
+ module.fail_json(msg="Insertafter and insertbefore parameters are valid only with env=yes")
+
+ # if requested make a backup before making a change
+ if backup and not module.check_mode:
+ (backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
+ crontab.write(backup_file)
+
+ if env:
+ if ' ' in name:
+ module.fail_json(msg="Invalid name for environment variable")
+ decl = '%s="%s"' % (name, job)
+ old_decl = crontab.find_env(name)
+
+ if do_install:
+ if len(old_decl) == 0:
+ crontab.add_env(decl, insertafter, insertbefore)
+ changed = True
+ if len(old_decl) > 0 and old_decl[1] != decl:
+ crontab.update_env(name, decl)
+ changed = True
+ else:
+ if len(old_decl) > 0:
+ crontab.remove_env(name)
+ changed = True
+ else:
+ if do_install:
+ for char in ['\r', '\n']:
+ if char in job.strip('\r\n'):
+ warnings.append('Job should not contain line breaks')
+ break
+
+ job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
+ old_job = crontab.find_job(name, job)
+
+ if len(old_job) == 0:
+ crontab.add_job(name, job)
+ changed = True
+ if len(old_job) > 0 and old_job[1] != job:
+ crontab.update_job(name, job)
+ changed = True
+ if len(old_job) > 2:
+ crontab.update_job(name, job)
+ changed = True
+ else:
+ old_job = crontab.find_job(name)
+
+ if len(old_job) > 0:
+ crontab.remove_job(name)
+ changed = True
+ if crontab.cron_file and crontab.is_empty():
+ if module._diff:
+ diff['after'] = ''
+ diff['after_header'] = '/dev/null'
+ else:
+ diff = dict()
+ if module.check_mode:
+ changed = os.path.isfile(crontab.cron_file)
+ else:
+ changed = crontab.remove_job_file()
+ module.exit_json(changed=changed, cron_file=cron_file, state=state, diff=diff)
+
+ # no changes to env/job, but existing crontab needs a terminating newline
+ if not changed and crontab.n_existing != '':
+ if not (crontab.n_existing.endswith('\r') or crontab.n_existing.endswith('\n')):
+ changed = True
+
+ res_args = dict(
+ jobs=crontab.get_jobnames(),
+ envs=crontab.get_envnames(),
+ warnings=warnings,
+ changed=changed
+ )
+
+ if changed:
+ if not module.check_mode:
+ crontab.write()
+ if module._diff:
+ diff['after'] = crontab.render()
+ if crontab.cron_file:
+ diff['after_header'] = crontab.cron_file
+ else:
+ if crontab.user:
+ diff['after_header'] = 'crontab for user "%s"' % crontab.user
+ else:
+ diff['after_header'] = 'crontab'
+
+ res_args['diff'] = diff
+
+ # retain the backup only if crontab or cron file have changed
+ if backup and not module.check_mode:
+ if changed:
+ res_args['backup_file'] = backup_file
+ else:
+ os.unlink(backup_file)
+
+ if cron_file:
+ res_args['cron_file'] = cron_file
+
+ module.exit_json(**res_args)
+
+ # --- should never get here
+ module.exit_json(msg="Unable to execute cron task.")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/debconf.py b/lib/ansible/modules/debconf.py
new file mode 100644
index 0000000..32f0000
--- /dev/null
+++ b/lib/ansible/modules/debconf.py
@@ -0,0 +1,231 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Brian Coca <briancoca+ansible@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: debconf
+short_description: Configure a .deb package
+description:
+ - Configure a .deb package using debconf-set-selections.
+ - Or just query existing selections.
+version_added: "1.6"
+extends_documentation_fragment:
+- action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ support: full
+ platforms: debian
+notes:
+ - This module requires the command line debconf tools.
+ - A number of questions have to be answered (depending on the package).
+ Use 'debconf-show <package>' on any Debian or derivative with the package
+ installed to see questions/settings available.
+ - Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
+ - It is highly recommended to add I(no_log=True) to task while handling sensitive information using this module.
+ - The debconf module does not reconfigure packages, it just updates the debconf database.
+ An additional step is needed (typically with I(notify) if debconf makes a change)
+ to reconfigure the package and apply the changes.
+ debconf is extensively used for pre-seeding configuration prior to installation
+ rather than modifying configurations.
+ So, while dpkg-reconfigure does use debconf data, it is not always authoritative
+ and you may need to check how your package is handled.
+ - Also note dpkg-reconfigure is a 3-phase process. It invokes the
+ control scripts from the C(/var/lib/dpkg/info) directory with the
+ C(<package>.prerm reconfigure <version>),
+ C(<package>.config reconfigure <version>) and C(<package>.postinst control <version>) arguments.
+ - The main issue is that the C(<package>.config reconfigure) step for many packages
+ will first reset the debconf database (overriding changes made by this module) by
+ checking the on-disk configuration. If this is the case for your package then
+ dpkg-reconfigure will effectively ignore changes made by debconf.
+ - However as dpkg-reconfigure only executes the C(<package>.config) step if the file
+ exists, it is possible to rename it to C(/var/lib/dpkg/info/<package>.config.ignore)
+ before executing C(dpkg-reconfigure -f noninteractive <package>) and then restore it.
+ This seems to be compliant with Debian policy for the .config file.
+requirements:
+- debconf
+- debconf-utils
+options:
+ name:
+ description:
+ - Name of package to configure.
+ type: str
+ required: true
+ aliases: [ pkg ]
+ question:
+ description:
+ - A debconf configuration setting.
+ type: str
+ aliases: [ selection, setting ]
+ vtype:
+ description:
+ - The type of the value supplied.
+ - It is highly recommended to add I(no_log=True) to task while specifying I(vtype=password).
+ - C(seen) was added in Ansible 2.2.
+ type: str
+ choices: [ boolean, error, multiselect, note, password, seen, select, string, text, title ]
+ value:
+ description:
+ - Value to set the configuration to.
+ type: str
+ aliases: [ answer ]
+ unseen:
+ description:
+ - Do not set 'seen' flag when pre-seeding.
+ type: bool
+ default: false
+author:
+- Brian Coca (@bcoca)
+'''
+
+EXAMPLES = r'''
+- name: Set default locale to fr_FR.UTF-8
+ ansible.builtin.debconf:
+ name: locales
+ question: locales/default_environment_locale
+ value: fr_FR.UTF-8
+ vtype: select
+
+- name: Set to generate locales
+ ansible.builtin.debconf:
+ name: locales
+ question: locales/locales_to_be_generated
+ value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
+ vtype: multiselect
+
+- name: Accept oracle license
+ ansible.builtin.debconf:
+ name: oracle-java7-installer
+ question: shared/accepted-oracle-license-v1-1
+ value: 'true'
+ vtype: select
+
+- name: Specifying package you can register/return the list of questions and current values
+ ansible.builtin.debconf:
+ name: tzdata
+
+- name: Pre-configure tripwire site passphrase
+ ansible.builtin.debconf:
+ name: tripwire
+ question: tripwire/site-passphrase
+ value: "{{ site_passphrase }}"
+ vtype: password
+ no_log: True
+'''
+
+RETURN = r'''#'''
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_selections(module, pkg):
+ cmd = [module.get_bin_path('debconf-show', True), pkg]
+ rc, out, err = module.run_command(' '.join(cmd))
+
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ selections = {}
+
+ for line in out.splitlines():
+ (key, value) = line.split(':', 1)
+ selections[key.strip('*').strip()] = value.strip()
+
+ return selections
+
+
+def set_selection(module, pkg, question, vtype, value, unseen):
+ setsel = module.get_bin_path('debconf-set-selections', True)
+ cmd = [setsel]
+ if unseen:
+ cmd.append('-u')
+
+ if vtype == 'boolean':
+ if value == 'True':
+ value = 'true'
+ elif value == 'False':
+ value = 'false'
+ data = ' '.join([pkg, question, vtype, value])
+
+ return module.run_command(cmd, data=data)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['pkg']),
+ question=dict(type='str', aliases=['selection', 'setting']),
+ vtype=dict(type='str', choices=['boolean', 'error', 'multiselect', 'note', 'password', 'seen', 'select', 'string', 'text', 'title']),
+ value=dict(type='str', aliases=['answer']),
+ unseen=dict(type='bool', default=False),
+ ),
+ required_together=(['question', 'vtype', 'value'],),
+ supports_check_mode=True,
+ )
+
+ # TODO: enable passing array of options and/or debconf file from get-selections dump
+ pkg = module.params["name"]
+ question = module.params["question"]
+ vtype = module.params["vtype"]
+ value = module.params["value"]
+ unseen = module.params["unseen"]
+
+ prev = get_selections(module, pkg)
+
+ changed = False
+ msg = ""
+
+ if question is not None:
+ if vtype is None or value is None:
+ module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
+
+ # if question doesn't exist, value cannot match
+ if question not in prev:
+ changed = True
+ else:
+
+ existing = prev[question]
+
+ # ensure we compare booleans supplied to the way debconf sees them (true/false strings)
+ if vtype == 'boolean':
+ value = to_text(value).lower()
+ existing = to_text(prev[question]).lower()
+
+ if value != existing:
+ changed = True
+
+ if changed:
+ if not module.check_mode:
+ rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
+ if rc:
+ module.fail_json(msg=e)
+
+ curr = {question: value}
+ if question in prev:
+ prev = {question: prev[question]}
+ else:
+ prev[question] = ''
+ if module._diff:
+ after = prev.copy()
+ after.update(curr)
+ diff_dict = {'before': prev, 'after': after}
+ else:
+ diff_dict = {}
+
+ module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
+
+ module.exit_json(changed=changed, msg=msg, current=prev)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/debug.py b/lib/ansible/modules/debug.py
new file mode 100644
index 0000000..b275a20
--- /dev/null
+++ b/lib/ansible/modules/debug.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: debug
+short_description: Print statements during execution
+description:
+- This module prints statements during execution and can be useful
+ for debugging variables or expressions without necessarily halting
+ the playbook.
+- Useful for debugging together with the 'when:' directive.
+- This module is also supported for Windows targets.
+version_added: '0.8'
+options:
+ msg:
+ description:
+ - The customized message that is printed. If omitted, prints a generic message.
+ type: str
+ default: 'Hello world!'
+ var:
+ description:
+ - A variable name to debug.
+ - Mutually exclusive with the C(msg) option.
+ - Be aware that this option already runs in Jinja2 context and has an implicit C({{ }}) wrapping,
+ so you should not be using Jinja2 delimiters unless you are looking for double interpolation.
+ type: str
+ verbosity:
+ description:
+ - A number that controls when the debug is run, if you set to 3 it will only run debug when -vvv or above.
+ type: int
+ default: 0
+ version_added: '2.1'
+extends_documentation_fragment:
+- action_common_attributes
+- action_common_attributes.conn
+- action_common_attributes.flow
+
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ become:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ connection:
+ support: none
+ delegation:
+ details: Aside from C(register) and/or in combination with C(delegate_facts), it has little effect.
+ support: partial
+ platform:
+ support: full
+ platforms: all
+seealso:
+- module: ansible.builtin.assert
+- module: ansible.builtin.fail
+author:
+- Dag Wieers (@dagwieers)
+- Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Print the gateway for each host when defined
+ ansible.builtin.debug:
+ msg: System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}
+ when: ansible_default_ipv4.gateway is defined
+
+- name: Get uptime information
+ ansible.builtin.shell: /usr/bin/uptime
+ register: result
+
+- name: Print return information from the previous task
+ ansible.builtin.debug:
+ var: result
+ verbosity: 2
+
+- name: Display all variables/facts known for a host
+ ansible.builtin.debug:
+ var: hostvars[inventory_hostname]
+ verbosity: 4
+
+- name: Prints two lines of messages, but only if there is an environment value set
+ ansible.builtin.debug:
+ msg:
+ - "Provisioning based on YOUR_KEY which is: {{ lookup('ansible.builtin.env', 'YOUR_KEY') }}"
+ - "These servers were built using the password of '{{ password_used }}'. Please retain this for later use."
+'''
diff --git a/lib/ansible/modules/dnf.py b/lib/ansible/modules/dnf.py
new file mode 100644
index 0000000..8131833
--- /dev/null
+++ b/lib/ansible/modules/dnf.py
@@ -0,0 +1,1468 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2015 Cristian van Ee <cristian at cvee.org>
+# Copyright 2015 Igor Gnatenko <i.gnatenko.brain@gmail.com>
+# Copyright 2018 Adam Miller <admiller@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dnf
+version_added: 1.9
+short_description: Manages packages with the I(dnf) package manager
+description:
+ - Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager.
+options:
+ name:
+ description:
+ - "A package name or package specifier with version, like C(name-1.0).
+ When using state=latest, this can be '*' which means run: dnf -y update.
+ You can also pass a url or a local path to a rpm file.
+ To operate on several packages this can accept a comma separated string of packages or a list of packages."
+ - Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name >= 1.0).
+ Spaces around the operator are required.
+ - You can also pass an absolute path for a binary which is provided by the package to install.
+ See examples for more information.
+ required: true
+ aliases:
+ - pkg
+ type: list
+ elements: str
+
+ list:
+ description:
+ - Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks.
+ Use M(ansible.builtin.package_facts) instead of the C(list) argument as a best practice.
+ type: str
+
+ state:
+ description:
+ - Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
+ - Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
+ enabled for this module, then C(absent) is inferred.
+ choices: ['absent', 'present', 'installed', 'removed', 'latest']
+ type: str
+
+ enablerepo:
+ description:
+ - I(Repoid) of repositories to enable for the install/update operation.
+ These repos will not persist beyond the transaction.
+ When specifying multiple repos, separate them with a ",".
+ type: list
+ elements: str
+
+ disablerepo:
+ description:
+ - I(Repoid) of repositories to disable for the install/update operation.
+ These repos will not persist beyond the transaction.
+ When specifying multiple repos, separate them with a ",".
+ type: list
+ elements: str
+
+ conf_file:
+ description:
+ - The remote dnf configuration file to use for the transaction.
+ type: str
+
+ disable_gpg_check:
+ description:
+ - Whether to disable the GPG checking of signatures of packages being
+ installed. Has an effect only if state is I(present) or I(latest).
+ - This setting affects packages installed from a repository as well as
+ "local" packages installed from the filesystem or a URL.
+ type: bool
+ default: 'no'
+
+ installroot:
+ description:
+ - Specifies an alternative installroot, relative to which all packages
+ will be installed.
+ version_added: "2.3"
+ default: "/"
+ type: str
+
+ releasever:
+ description:
+ - Specifies an alternative release from which all packages will be
+ installed.
+ version_added: "2.6"
+ type: str
+
+ autoremove:
+ description:
+ - If C(true), removes all "leaf" packages from the system that were originally
+ installed as dependencies of user-installed packages but which are no longer
+ required by any such package. Should be used alone or when state is I(absent)
+ type: bool
+ default: "no"
+ version_added: "2.4"
+ exclude:
+ description:
+ - Package name(s) to exclude when state=present, or latest. This can be a
+ list or a comma separated string.
+ version_added: "2.7"
+ type: list
+ elements: str
+ skip_broken:
+ description:
+ - Skip all unavailable packages or packages with broken dependencies
+ without raising an error. Equivalent to passing the --skip-broken option.
+ type: bool
+ default: "no"
+ version_added: "2.7"
+ update_cache:
+ description:
+ - Force dnf to check if cache is out of date and redownload if needed.
+ Has an effect only if state is I(present) or I(latest).
+ type: bool
+ default: "no"
+ aliases: [ expire-cache ]
+ version_added: "2.7"
+ update_only:
+ description:
+ - When using latest, only update installed packages. Do not install packages.
+ - Has an effect only if state is I(latest)
+ default: "no"
+ type: bool
+ version_added: "2.7"
+ security:
+ description:
+ - If set to C(true), and C(state=latest) then only installs updates that have been marked security related.
+ - Note that, similar to C(dnf upgrade-minimal), this filter applies to dependencies as well.
+ type: bool
+ default: "no"
+ version_added: "2.7"
+ bugfix:
+ description:
+ - If set to C(true), and C(state=latest) then only installs updates that have been marked bugfix related.
+ - Note that, similar to C(dnf upgrade-minimal), this filter applies to dependencies as well.
+ default: "no"
+ type: bool
+ version_added: "2.7"
+ enable_plugin:
+ description:
+ - I(Plugin) name to enable for the install/update operation.
+ The enabled plugin will not persist beyond the transaction.
+ version_added: "2.7"
+ type: list
+ elements: str
+ disable_plugin:
+ description:
+ - I(Plugin) name to disable for the install/update operation.
+ The disabled plugins will not persist beyond the transaction.
+ version_added: "2.7"
+ type: list
+ elements: str
+ disable_excludes:
+ description:
+ - Disable the excludes defined in DNF config files.
+ - If set to C(all), disables all excludes.
+ - If set to C(main), disable excludes defined in [main] in dnf.conf.
+ - If set to C(repoid), disable excludes defined for given repo id.
+ version_added: "2.7"
+ type: str
+ validate_certs:
+ description:
+ - This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(false), the SSL certificates will not be validated.
+ - This should only set to C(false) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
+ type: bool
+ default: "yes"
+ version_added: "2.7"
+ sslverify:
+ description:
+ - Disables SSL validation of the repository server for this transaction.
+ - This should be set to C(false) if one of the configured repositories is using an untrusted or self-signed certificate.
+ type: bool
+ default: "yes"
+ version_added: "2.13"
+ allow_downgrade:
+ description:
+ - Specify if the named package and version is allowed to downgrade
+ a maybe already installed higher version of that package.
+ Note that setting allow_downgrade=True can make this module
+ behave in a non-idempotent way. The task could end up with a set
+ of packages that does not match the complete list of specified
+ packages to install (because dependencies between the downgraded
+ package and others can cause changes to the packages which were
+ in the earlier transaction).
+ type: bool
+ default: "no"
+ version_added: "2.7"
+ install_repoquery:
+ description:
+ - This is effectively a no-op in DNF as it is not needed with DNF, but is an accepted parameter for feature
+ parity/compatibility with the I(yum) module.
+ type: bool
+ default: "yes"
+ version_added: "2.7"
+ download_only:
+ description:
+ - Only download the packages, do not install them.
+ default: "no"
+ type: bool
+ version_added: "2.7"
+ lock_timeout:
+ description:
+ - Amount of time to wait for the dnf lockfile to be freed.
+ required: false
+ default: 30
+ type: int
+ version_added: "2.8"
+ install_weak_deps:
+ description:
+ - Will also install all packages linked by a weak dependency relation.
+ type: bool
+ default: "yes"
+ version_added: "2.8"
+ download_dir:
+ description:
+ - Specifies an alternate directory to store packages.
+ - Has an effect only if I(download_only) is specified.
+ type: str
+ version_added: "2.8"
+ allowerasing:
+ description:
+ - If C(true) it allows erasing of installed packages to resolve dependencies.
+ required: false
+ type: bool
+ default: "no"
+ version_added: "2.10"
+ nobest:
+ description:
+ - Set best option to False, so that transactions are not limited to best candidates only.
+ required: false
+ type: bool
+ default: "no"
+ version_added: "2.11"
+ cacheonly:
+ description:
+ - Tells dnf to run entirely from system cache; does not download or update metadata.
+ type: bool
+ default: "no"
+ version_added: "2.12"
+extends_documentation_fragment:
+- action_common_attributes
+- action_common_attributes.flow
+attributes:
+ action:
+ details: In the case of dnf, it has 2 action plugins that use it under the hood, M(ansible.builtin.yum) and M(ansible.builtin.package).
+ support: partial
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: rhel
+notes:
+ - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option.
+ - Group removal doesn't work if the group was installed with Ansible because
+ upstream dnf's API doesn't properly mark groups as installed, therefore upon
+ removal the module is unable to detect that the group is installed
+ (https://bugzilla.redhat.com/show_bug.cgi?id=1620324)
+requirements:
+ - "python >= 2.6"
+ - python-dnf
+ - for the autoremove option you need dnf >= 2.0.1"
+author:
+ - Igor Gnatenko (@ignatenkobrain) <i.gnatenko.brain@gmail.com>
+ - Cristian van Ee (@DJMuggs) <cristian at cvee.org>
+ - Berend De Schouwer (@berenddeschouwer)
+ - Adam Miller (@maxamillion) <admiller@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: Install the latest version of Apache
+ ansible.builtin.dnf:
+ name: httpd
+ state: latest
+
+- name: Install Apache >= 2.4
+ ansible.builtin.dnf:
+ name: httpd >= 2.4
+ state: present
+
+- name: Install the latest version of Apache and MariaDB
+ ansible.builtin.dnf:
+ name:
+ - httpd
+ - mariadb-server
+ state: latest
+
+- name: Remove the Apache package
+ ansible.builtin.dnf:
+ name: httpd
+ state: absent
+
+- name: Install the latest version of Apache from the testing repo
+ ansible.builtin.dnf:
+ name: httpd
+ enablerepo: testing
+ state: present
+
+- name: Upgrade all packages
+ ansible.builtin.dnf:
+ name: "*"
+ state: latest
+
+- name: Update the webserver, depending on which is installed on the system. Do not install the other one
+ ansible.builtin.dnf:
+ name:
+ - httpd
+ - nginx
+ state: latest
+ update_only: yes
+
+- name: Install the nginx rpm from a remote repo
+ ansible.builtin.dnf:
+ name: 'http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm'
+ state: present
+
+- name: Install nginx rpm from a local file
+ ansible.builtin.dnf:
+ name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
+ state: present
+
+- name: Install Package based upon the file it provides
+ ansible.builtin.dnf:
+ name: /usr/bin/cowsay
+ state: present
+
+- name: Install the 'Development tools' package group
+ ansible.builtin.dnf:
+ name: '@Development tools'
+ state: present
+
+- name: Autoremove unneeded packages installed as dependencies
+ ansible.builtin.dnf:
+ autoremove: yes
+
+- name: Uninstall httpd but keep its dependencies
+ ansible.builtin.dnf:
+ name: httpd
+ state: absent
+ autoremove: no
+
+- name: Install a modularity appstream with defined stream and profile
+ ansible.builtin.dnf:
+ name: '@postgresql:9.6/client'
+ state: present
+
+- name: Install a modularity appstream with defined stream
+ ansible.builtin.dnf:
+ name: '@postgresql:9.6'
+ state: present
+
+- name: Install a modularity appstream with defined profile
+ ansible.builtin.dnf:
+ name: '@postgresql/client'
+ state: present
+'''
+
+import os
+import re
+import sys
+
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.urls import fetch_file
+from ansible.module_utils.six import PY2, text_type
+from ansible.module_utils.compat.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
+from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
+
+
+# NOTE dnf Python bindings import is postponed, see DnfModule._ensure_dnf(),
+# because we need AnsibleModule object to use get_best_parsable_locale()
+# to set proper locale before importing dnf to be able to scrape
+# the output in some cases (FIXME?).
+dnf = None
+
+
+class DnfModule(YumDnf):
+ """
+ DNF Ansible module back-end implementation
+ """
+
+ def __init__(self, module):
+ # This populates instance vars for all argument spec params
+ super(DnfModule, self).__init__(module)
+
+ self._ensure_dnf()
+ self.lockfile = "/var/cache/dnf/*_lock.pid"
+ self.pkg_mgr_name = "dnf"
+
+ try:
+ self.with_modules = dnf.base.WITH_MODULES
+ except AttributeError:
+ self.with_modules = False
+
+ # DNF specific args that are not part of YumDnf
+ self.allowerasing = self.module.params['allowerasing']
+ self.nobest = self.module.params['nobest']
+
+ def is_lockfile_pid_valid(self):
+ # FIXME? it looks like DNF takes care of invalid lock files itself?
+ # https://github.com/ansible/ansible/issues/57189
+ return True
+
+ def _sanitize_dnf_error_msg_install(self, spec, error):
+ """
+ For unhandled dnf.exceptions.Error scenarios, there are certain error
+ messages we want to filter in an install scenario. Do that here.
+ """
+ if (
+ to_text("no package matched") in to_text(error) or
+ to_text("No match for argument:") in to_text(error)
+ ):
+ return "No package {0} available.".format(spec)
+
+ return error
+
+ def _sanitize_dnf_error_msg_remove(self, spec, error):
+ """
+ For unhandled dnf.exceptions.Error scenarios, there are certain error
+ messages we want to ignore in a removal scenario as known benign
+ failures. Do that here.
+ """
+ if (
+ 'no package matched' in to_native(error) or
+ 'No match for argument:' in to_native(error)
+ ):
+ return (False, "{0} is not installed".format(spec))
+
+ # Return value is tuple of:
+ # ("Is this actually a failure?", "Error Message")
+ return (True, error)
+
+ def _package_dict(self, package):
+ """Return a dictionary of information for the package."""
+ # NOTE: This no longer contains the 'dnfstate' field because it is
+ # already known based on the query type.
+ result = {
+ 'name': package.name,
+ 'arch': package.arch,
+ 'epoch': str(package.epoch),
+ 'release': package.release,
+ 'version': package.version,
+ 'repo': package.repoid}
+
+ # envra format for alignment with the yum module
+ result['envra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format(**result)
+
+ # keep nevra key for backwards compat as it was previously
+ # defined with a value in envra format
+ result['nevra'] = result['envra']
+
+ if package.installtime == 0:
+ result['yumstate'] = 'available'
+ else:
+ result['yumstate'] = 'installed'
+
+ return result
+
+ def _split_package_arch(self, packagename):
+ # This list was auto generated on a Fedora 28 system with the following one-liner
+ # printf '[ '; for arch in $(ls /usr/lib/rpm/platform); do printf '"%s", ' ${arch%-linux}; done; printf ']\n'
+ redhat_rpm_arches = [
+ "aarch64", "alphaev56", "alphaev5", "alphaev67", "alphaev6", "alpha",
+ "alphapca56", "amd64", "armv3l", "armv4b", "armv4l", "armv5tejl", "armv5tel",
+ "armv5tl", "armv6hl", "armv6l", "armv7hl", "armv7hnl", "armv7l", "athlon",
+ "geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "m68k", "mips64el",
+ "mips64", "mips64r6el", "mips64r6", "mipsel", "mips", "mipsr6el", "mipsr6",
+ "noarch", "pentium3", "pentium4", "ppc32dy4", "ppc64iseries", "ppc64le", "ppc64",
+ "ppc64p7", "ppc64pseries", "ppc8260", "ppc8560", "ppciseries", "ppc", "ppcpseries",
+ "riscv64", "s390", "s390x", "sh3", "sh4a", "sh4", "sh", "sparc64", "sparc64v",
+ "sparc", "sparcv8", "sparcv9", "sparcv9v", "x86_64"
+ ]
+
+ name, delimiter, arch = packagename.rpartition('.')
+ if name and arch and arch in redhat_rpm_arches:
+ return name, arch
+ return packagename, None
+
+ def _packagename_dict(self, packagename):
+ """
+ Return a dictionary of information for a package name string or None
+ if the package name doesn't contain at least all NVR elements
+ """
+
+ if packagename[-4:] == '.rpm':
+ packagename = packagename[:-4]
+
+ rpm_nevr_re = re.compile(r'(\S+)-(?:(\d*):)?(.*)-(~?\w+[\w.+]*)')
+ try:
+ arch = None
+ nevr, arch = self._split_package_arch(packagename)
+ if arch:
+ packagename = nevr
+ rpm_nevr_match = rpm_nevr_re.match(packagename)
+ if rpm_nevr_match:
+ name, epoch, version, release = rpm_nevr_re.match(packagename).groups()
+ if not version or not version.split('.')[0].isdigit():
+ return None
+ else:
+ return None
+ except AttributeError as e:
+ self.module.fail_json(
+ msg='Error attempting to parse package: %s, %s' % (packagename, to_native(e)),
+ rc=1,
+ results=[]
+ )
+
+ if not epoch:
+ epoch = "0"
+
+ if ':' in name:
+ epoch_name = name.split(":")
+
+ epoch = epoch_name[0]
+ name = ''.join(epoch_name[1:])
+
+ result = {
+ 'name': name,
+ 'epoch': epoch,
+ 'release': release,
+ 'version': version,
+ }
+
+ return result
+
+ # Original implementation from yum.rpmUtils.miscutils (GPLv2+)
+ # http://yum.baseurl.org/gitweb?p=yum.git;a=blob;f=rpmUtils/miscutils.py
+ def _compare_evr(self, e1, v1, r1, e2, v2, r2):
+ # return 1: a is newer than b
+ # 0: a and b are the same version
+ # -1: b is newer than a
+ if e1 is None:
+ e1 = '0'
+ else:
+ e1 = str(e1)
+ v1 = str(v1)
+ r1 = str(r1)
+ if e2 is None:
+ e2 = '0'
+ else:
+ e2 = str(e2)
+ v2 = str(v2)
+ r2 = str(r2)
+ rc = dnf.rpm.rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
+ return rc
+
+ def _ensure_dnf(self):
+ locale = get_best_parsable_locale(self.module)
+ os.environ['LC_ALL'] = os.environ['LC_MESSAGES'] = locale
+ os.environ['LANGUAGE'] = os.environ['LANG'] = locale
+
+ global dnf
+ try:
+ import dnf
+ import dnf.cli
+ import dnf.const
+ import dnf.exceptions
+ import dnf.subject
+ import dnf.util
+ HAS_DNF = True
+ except ImportError:
+ HAS_DNF = False
+
+ if HAS_DNF:
+ return
+
+ system_interpreters = ['/usr/libexec/platform-python',
+ '/usr/bin/python3',
+ '/usr/bin/python2',
+ '/usr/bin/python']
+
+ if not has_respawned():
+ # probe well-known system Python locations for accessible bindings, favoring py3
+ interpreter = probe_interpreters_for_module(system_interpreters, 'dnf')
+
+ if interpreter:
+ # respawn under the interpreter where the bindings should be found
+ respawn_module(interpreter)
+ # end of the line for this module, the process will exit here once the respawned module completes
+
+ # done all we can do, something is just broken (auto-install isn't useful anymore with respawn, so it was removed)
+ self.module.fail_json(
+ msg="Could not import the dnf python module using {0} ({1}). "
+ "Please install `python3-dnf` or `python2-dnf` package or ensure you have specified the "
+ "correct ansible_python_interpreter. (attempted {2})"
+ .format(sys.executable, sys.version.replace('\n', ''), system_interpreters),
+ results=[]
+ )
+
+ def _configure_base(self, base, conf_file, disable_gpg_check, installroot='/', sslverify=True):
+ """Configure the dnf Base object."""
+
+ conf = base.conf
+
+ # Change the configuration file path if provided, this must be done before conf.read() is called
+ if conf_file:
+ # Fail if we can't read the configuration file.
+ if not os.access(conf_file, os.R_OK):
+ self.module.fail_json(
+ msg="cannot read configuration file", conf_file=conf_file,
+ results=[],
+ )
+ else:
+ conf.config_file_path = conf_file
+
+ # Read the configuration file
+ conf.read()
+
+ # Turn off debug messages in the output
+ conf.debuglevel = 0
+
+ # Set whether to check gpg signatures
+ conf.gpgcheck = not disable_gpg_check
+ conf.localpkg_gpgcheck = not disable_gpg_check
+
+ # Don't prompt for user confirmations
+ conf.assumeyes = True
+
+ # Set certificate validation
+ conf.sslverify = sslverify
+
+ # Set installroot
+ conf.installroot = installroot
+
+ # Load substitutions from the filesystem
+ conf.substitutions.update_from_etc(installroot)
+
+ # Handle different DNF versions immutable mutable datatypes and
+ # dnf v1/v2/v3
+ #
+ # In DNF < 3.0 are lists, and modifying them works
+ # In DNF >= 3.0 < 3.6 are lists, but modifying them doesn't work
+ # In DNF >= 3.6 have been turned into tuples, to communicate that modifying them doesn't work
+ #
+ # https://www.happyassassin.net/2018/06/27/adams-debugging-adventures-the-immutable-mutable-object/
+ #
+ # Set excludes
+ if self.exclude:
+ _excludes = list(conf.exclude)
+ _excludes.extend(self.exclude)
+ conf.exclude = _excludes
+ # Set disable_excludes
+ if self.disable_excludes:
+ _disable_excludes = list(conf.disable_excludes)
+ if self.disable_excludes not in _disable_excludes:
+ _disable_excludes.append(self.disable_excludes)
+ conf.disable_excludes = _disable_excludes
+
+ # Set releasever
+ if self.releasever is not None:
+ conf.substitutions['releasever'] = self.releasever
+
+ if conf.substitutions.get('releasever') is None:
+ self.module.warn(
+ 'Unable to detect release version (use "releasever" option to specify release version)'
+ )
+ # values of conf.substitutions are expected to be strings
+ # setting this to an empty string instead of None appears to mimic the DNF CLI behavior
+ conf.substitutions['releasever'] = ''
+
+ # Set skip_broken (in dnf this is strict=0)
+ if self.skip_broken:
+ conf.strict = 0
+
+ # Set best
+ if self.nobest:
+ conf.best = 0
+
+ if self.download_only:
+ conf.downloadonly = True
+ if self.download_dir:
+ conf.destdir = self.download_dir
+
+ if self.cacheonly:
+ conf.cacheonly = True
+
+ # Default in dnf upstream is true
+ conf.clean_requirements_on_remove = self.autoremove
+
+ # Default in dnf (and module default) is True
+ conf.install_weak_deps = self.install_weak_deps
+
+ def _specify_repositories(self, base, disablerepo, enablerepo):
+ """Enable and disable repositories matching the provided patterns."""
+ base.read_all_repos()
+ repos = base.repos
+
+ # Disable repositories
+ for repo_pattern in disablerepo:
+ if repo_pattern:
+ for repo in repos.get_matching(repo_pattern):
+ repo.disable()
+
+ # Enable repositories
+ for repo_pattern in enablerepo:
+ if repo_pattern:
+ for repo in repos.get_matching(repo_pattern):
+ repo.enable()
+
+ def _base(self, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot, sslverify):
+ """Return a fully configured dnf Base object."""
+ base = dnf.Base()
+ self._configure_base(base, conf_file, disable_gpg_check, installroot, sslverify)
+ try:
+ # this method has been supported in dnf-4.2.17-6 or later
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1788212
+ base.setup_loggers()
+ except AttributeError:
+ pass
+ try:
+ base.init_plugins(set(self.disable_plugin), set(self.enable_plugin))
+ base.pre_configure_plugins()
+ except AttributeError:
+ pass # older versions of dnf didn't require this and don't have these methods
+ self._specify_repositories(base, disablerepo, enablerepo)
+ try:
+ base.configure_plugins()
+ except AttributeError:
+ pass # older versions of dnf didn't require this and don't have these methods
+
+ try:
+ if self.update_cache:
+ try:
+ base.update_cache()
+ except dnf.exceptions.RepoError as e:
+ self.module.fail_json(
+ msg="{0}".format(to_text(e)),
+ results=[],
+ rc=1
+ )
+ base.fill_sack(load_system_repo='auto')
+ except dnf.exceptions.RepoError as e:
+ self.module.fail_json(
+ msg="{0}".format(to_text(e)),
+ results=[],
+ rc=1
+ )
+
+ add_security_filters = getattr(base, "add_security_filters", None)
+ if callable(add_security_filters):
+ filters = {}
+ if self.bugfix:
+ filters.setdefault('types', []).append('bugfix')
+ if self.security:
+ filters.setdefault('types', []).append('security')
+ if filters:
+ add_security_filters('eq', **filters)
+ else:
+ filters = []
+ if self.bugfix:
+ key = {'advisory_type__eq': 'bugfix'}
+ filters.append(base.sack.query().upgrades().filter(**key))
+ if self.security:
+ key = {'advisory_type__eq': 'security'}
+ filters.append(base.sack.query().upgrades().filter(**key))
+ if filters:
+ base._update_security_filters = filters
+
+ return base
+
+ def list_items(self, command):
+ """List package info based on the command."""
+ # Rename updates to upgrades
+ if command == 'updates':
+ command = 'upgrades'
+
+ # Return the corresponding packages
+ if command in ['installed', 'upgrades', 'available']:
+ results = [
+ self._package_dict(package)
+ for package in getattr(self.base.sack.query(), command)()]
+ # Return the enabled repository ids
+ elif command in ['repos', 'repositories']:
+ results = [
+ {'repoid': repo.id, 'state': 'enabled'}
+ for repo in self.base.repos.iter_enabled()]
+ # Return any matching packages
+ else:
+ packages = dnf.subject.Subject(command).get_best_query(self.base.sack)
+ results = [self._package_dict(package) for package in packages]
+
+ self.module.exit_json(msg="", results=results)
+
+ def _is_installed(self, pkg):
+ installed = self.base.sack.query().installed()
+
+ package_spec = {}
+ name, arch = self._split_package_arch(pkg)
+ if arch:
+ package_spec['arch'] = arch
+
+ package_details = self._packagename_dict(pkg)
+ if package_details:
+ package_details['epoch'] = int(package_details['epoch'])
+ package_spec.update(package_details)
+ else:
+ package_spec['name'] = name
+
+ return bool(installed.filter(**package_spec))
+
+ def _is_newer_version_installed(self, pkg_name):
+ candidate_pkg = self._packagename_dict(pkg_name)
+ if not candidate_pkg:
+ # The user didn't provide a versioned rpm, so version checking is
+ # not required
+ return False
+
+ installed = self.base.sack.query().installed()
+ installed_pkg = installed.filter(name=candidate_pkg['name']).run()
+ if installed_pkg:
+ installed_pkg = installed_pkg[0]
+
+ # this looks weird but one is a dict and the other is a dnf.Package
+ evr_cmp = self._compare_evr(
+ installed_pkg.epoch, installed_pkg.version, installed_pkg.release,
+ candidate_pkg['epoch'], candidate_pkg['version'], candidate_pkg['release'],
+ )
+
+ return evr_cmp == 1
+ else:
+ return False
+
+ def _mark_package_install(self, pkg_spec, upgrade=False):
+ """Mark the package for install."""
+ is_newer_version_installed = self._is_newer_version_installed(pkg_spec)
+ is_installed = self._is_installed(pkg_spec)
+ try:
+ if is_newer_version_installed:
+ if self.allow_downgrade:
+ # dnf only does allow_downgrade, we have to handle this ourselves
+ # because it allows a possibility for non-idempotent transactions
+ # on a system's package set (pending the yum repo has many old
+ # NVRs indexed)
+ if upgrade:
+ if is_installed: # Case 1
+ # TODO: Is this case reachable?
+ #
+ # _is_installed() demands a name (*not* NVR) or else is always False
+ # (wildcards are treated literally).
+ #
+ # Meanwhile, _is_newer_version_installed() demands something versioned
+ # or else is always false.
+ #
+ # I fail to see how they can both be true at the same time for any
+ # given pkg_spec. -re
+ self.base.upgrade(pkg_spec)
+ else: # Case 2
+ self.base.install(pkg_spec, strict=self.base.conf.strict)
+ else: # Case 3
+ self.base.install(pkg_spec, strict=self.base.conf.strict)
+ else: # Case 4, Nothing to do, report back
+ pass
+ elif is_installed: # A potentially older (or same) version is installed
+ if upgrade: # Case 5
+ self.base.upgrade(pkg_spec)
+ else: # Case 6, Nothing to do, report back
+ pass
+ else: # Case 7, The package is not installed, simply install it
+ self.base.install(pkg_spec, strict=self.base.conf.strict)
+
+ return {'failed': False, 'msg': '', 'failure': '', 'rc': 0}
+
+ except dnf.exceptions.MarkingError as e:
+ return {
+ 'failed': True,
+ 'msg': "No package {0} available.".format(pkg_spec),
+ 'failure': " ".join((pkg_spec, to_native(e))),
+ 'rc': 1,
+ "results": []
+ }
+
+ except dnf.exceptions.DepsolveError as e:
+ return {
+ 'failed': True,
+ 'msg': "Depsolve Error occurred for package {0}.".format(pkg_spec),
+ 'failure': " ".join((pkg_spec, to_native(e))),
+ 'rc': 1,
+ "results": []
+ }
+
+ except dnf.exceptions.Error as e:
+ if to_text("already installed") in to_text(e):
+ return {'failed': False, 'msg': '', 'failure': ''}
+ else:
+ return {
+ 'failed': True,
+ 'msg': "Unknown Error occurred for package {0}.".format(pkg_spec),
+ 'failure': " ".join((pkg_spec, to_native(e))),
+ 'rc': 1,
+ "results": []
+ }
+
+ def _whatprovides(self, filepath):
+ self.base.read_all_repos()
+ available = self.base.sack.query().available()
+ # Search in file
+ files_filter = available.filter(file=filepath)
+ # And Search in provides
+ pkg_spec = files_filter.union(available.filter(provides=filepath)).run()
+
+ if pkg_spec:
+ return pkg_spec[0].name
+
+ def _parse_spec_group_file(self):
+ pkg_specs, grp_specs, module_specs, filenames = [], [], [], []
+ already_loaded_comps = False # Only load this if necessary, it's slow
+
+ for name in self.names:
+ if '://' in name:
+ name = fetch_file(self.module, name)
+ filenames.append(name)
+ elif name.endswith(".rpm"):
+ filenames.append(name)
+ elif name.startswith('/'):
+ # like "dnf install /usr/bin/vi"
+ pkg_spec = self._whatprovides(name)
+ if pkg_spec:
+ pkg_specs.append(pkg_spec)
+ continue
+ elif name.startswith("@") or ('/' in name):
+ if not already_loaded_comps:
+ self.base.read_comps()
+ already_loaded_comps = True
+
+ grp_env_mdl_candidate = name[1:].strip()
+
+ if self.with_modules:
+ mdl = self.module_base._get_modules(grp_env_mdl_candidate)
+ if mdl[0]:
+ module_specs.append(grp_env_mdl_candidate)
+ else:
+ grp_specs.append(grp_env_mdl_candidate)
+ else:
+ grp_specs.append(grp_env_mdl_candidate)
+ else:
+ pkg_specs.append(name)
+ return pkg_specs, grp_specs, module_specs, filenames
+
+ def _update_only(self, pkgs):
+ not_installed = []
+ for pkg in pkgs:
+ if self._is_installed(pkg):
+ try:
+ if isinstance(to_text(pkg), text_type):
+ self.base.upgrade(pkg)
+ else:
+ self.base.package_upgrade(pkg)
+ except Exception as e:
+ self.module.fail_json(
+ msg="Error occurred attempting update_only operation: {0}".format(to_native(e)),
+ results=[],
+ rc=1,
+ )
+ else:
+ not_installed.append(pkg)
+
+ return not_installed
+
+ def _install_remote_rpms(self, filenames):
+ if int(dnf.__version__.split(".")[0]) >= 2:
+ pkgs = list(sorted(self.base.add_remote_rpms(list(filenames)), reverse=True))
+ else:
+ pkgs = []
+ try:
+ for filename in filenames:
+ pkgs.append(self.base.add_remote_rpm(filename))
+ except IOError as e:
+ if to_text("Can not load RPM file") in to_text(e):
+ self.module.fail_json(
+ msg="Error occurred attempting remote rpm install of package: {0}. {1}".format(filename, to_native(e)),
+ results=[],
+ rc=1,
+ )
+ if self.update_only:
+ self._update_only(pkgs)
+ else:
+ for pkg in pkgs:
+ try:
+ if self._is_newer_version_installed(self._package_dict(pkg)['nevra']):
+ if self.allow_downgrade:
+ self.base.package_install(pkg, strict=self.base.conf.strict)
+ else:
+ self.base.package_install(pkg, strict=self.base.conf.strict)
+ except Exception as e:
+ self.module.fail_json(
+ msg="Error occurred attempting remote rpm operation: {0}".format(to_native(e)),
+ results=[],
+ rc=1,
+ )
+
+ def _is_module_installed(self, module_spec):
+ if self.with_modules:
+ module_spec = module_spec.strip()
+ module_list, nsv = self.module_base._get_modules(module_spec)
+ enabled_streams = self.base._moduleContainer.getEnabledStream(nsv.name)
+
+ if enabled_streams:
+ if nsv.stream:
+ if nsv.stream in enabled_streams:
+ return True # The provided stream was found
+ else:
+ return False # The provided stream was not found
+ else:
+ return True # No stream provided, but module found
+
+ return False # seems like a sane default
+
+ def ensure(self):
+
+ response = {
+ 'msg': "",
+ 'changed': False,
+ 'results': [],
+ 'rc': 0
+ }
+
+ # Accumulate failures. Package management modules install what they can
+ # and fail with a message about what they can't.
+ failure_response = {
+ 'msg': "",
+ 'failures': [],
+ 'results': [],
+ 'rc': 1
+ }
+
+ # Autoremove is called alone
+ # Jump to remove path where base.autoremove() is run
+ if not self.names and self.autoremove:
+ self.names = []
+ self.state = 'absent'
+
+ if self.names == ['*'] and self.state == 'latest':
+ try:
+ self.base.upgrade_all()
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occurred attempting to upgrade all packages"
+ self.module.fail_json(**failure_response)
+ else:
+ pkg_specs, group_specs, module_specs, filenames = self._parse_spec_group_file()
+
+ pkg_specs = [p.strip() for p in pkg_specs]
+ filenames = [f.strip() for f in filenames]
+ groups = []
+ environments = []
+ for group_spec in (g.strip() for g in group_specs):
+ group = self.base.comps.group_by_pattern(group_spec)
+ if group:
+ groups.append(group.id)
+ else:
+ environment = self.base.comps.environment_by_pattern(group_spec)
+ if environment:
+ environments.append(environment.id)
+ else:
+ self.module.fail_json(
+ msg="No group {0} available.".format(group_spec),
+ results=[],
+ )
+
+ if self.state in ['installed', 'present']:
+ # Install files.
+ self._install_remote_rpms(filenames)
+ for filename in filenames:
+ response['results'].append("Installed {0}".format(filename))
+
+ # Install modules
+ if module_specs and self.with_modules:
+ for module in module_specs:
+ try:
+ if not self._is_module_installed(module):
+ response['results'].append("Module {0} installed.".format(module))
+ self.module_base.install([module])
+ self.module_base.enable([module])
+ except dnf.exceptions.MarkingErrors as e:
+ failure_response['failures'].append(' '.join((module, to_native(e))))
+
+ # Install groups.
+ for group in groups:
+ try:
+ group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
+ if group_pkg_count_installed == 0:
+ response['results'].append("Group {0} already installed.".format(group))
+ else:
+ response['results'].append("Group {0} installed.".format(group))
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occurred attempting to install group: {0}".format(group)
+ self.module.fail_json(**failure_response)
+ except dnf.exceptions.Error as e:
+ # In dnf 2.0 if all the mandatory packages in a group do
+ # not install, an error is raised. We want to capture
+ # this but still install as much as possible.
+ failure_response['failures'].append(" ".join((group, to_native(e))))
+
+ for environment in environments:
+ try:
+ self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occurred attempting to install environment: {0}".format(environment)
+ self.module.fail_json(**failure_response)
+ except dnf.exceptions.Error as e:
+ failure_response['failures'].append(" ".join((environment, to_native(e))))
+
+ if module_specs and not self.with_modules:
+ # This means that the group or env wasn't found in comps
+ self.module.fail_json(
+ msg="No group {0} available.".format(module_specs[0]),
+ results=[],
+ )
+
+ # Install packages.
+ if self.update_only:
+ not_installed = self._update_only(pkg_specs)
+ for spec in not_installed:
+ response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
+ else:
+ for pkg_spec in pkg_specs:
+ install_result = self._mark_package_install(pkg_spec)
+ if install_result['failed']:
+ if install_result['msg']:
+ failure_response['msg'] += install_result['msg']
+ failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
+ else:
+ if install_result['msg']:
+ response['results'].append(install_result['msg'])
+
+ elif self.state == 'latest':
+ # "latest" is same as "installed" for filenames.
+ self._install_remote_rpms(filenames)
+ for filename in filenames:
+ response['results'].append("Installed {0}".format(filename))
+
+ # Upgrade modules
+ if module_specs and self.with_modules:
+ for module in module_specs:
+ try:
+ if self._is_module_installed(module):
+ response['results'].append("Module {0} upgraded.".format(module))
+ self.module_base.upgrade([module])
+ except dnf.exceptions.MarkingErrors as e:
+ failure_response['failures'].append(' '.join((module, to_native(e))))
+
+ for group in groups:
+ try:
+ try:
+ self.base.group_upgrade(group)
+ response['results'].append("Group {0} upgraded.".format(group))
+ except dnf.exceptions.CompsError:
+ if not self.update_only:
+ # If not already installed, try to install.
+ group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
+ if group_pkg_count_installed == 0:
+ response['results'].append("Group {0} already installed.".format(group))
+ else:
+ response['results'].append("Group {0} installed.".format(group))
+ except dnf.exceptions.Error as e:
+ failure_response['failures'].append(" ".join((group, to_native(e))))
+
+ for environment in environments:
+ try:
+ try:
+ self.base.environment_upgrade(environment)
+ except dnf.exceptions.CompsError:
+ # If not already installed, try to install.
+ self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occurred attempting to install environment: {0}".format(environment)
+ except dnf.exceptions.Error as e:
+ failure_response['failures'].append(" ".join((environment, to_native(e))))
+
+ if self.update_only:
+ not_installed = self._update_only(pkg_specs)
+ for spec in not_installed:
+ response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
+ else:
+ for pkg_spec in pkg_specs:
+ # Previously we forced base.conf.best=True here.
+ # However in 2.11+ there is a self.nobest option, so defer to that.
+ # Note, however, that just because nobest isn't set, doesn't mean that
+ # base.conf.best is actually true. We only force it false in
+ # _configure_base(), we never set it to true, and it can default to false.
+ # Thus, we still need to explicitly set it here.
+ self.base.conf.best = not self.nobest
+ install_result = self._mark_package_install(pkg_spec, upgrade=True)
+ if install_result['failed']:
+ if install_result['msg']:
+ failure_response['msg'] += install_result['msg']
+ failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
+ else:
+ if install_result['msg']:
+ response['results'].append(install_result['msg'])
+
+ else:
+ # state == absent
+ if filenames:
+ self.module.fail_json(
+ msg="Cannot remove paths -- please specify package name.",
+ results=[],
+ )
+
+ # Remove modules
+ if module_specs and self.with_modules:
+ for module in module_specs:
+ try:
+ if self._is_module_installed(module):
+ response['results'].append("Module {0} removed.".format(module))
+ self.module_base.remove([module])
+ self.module_base.disable([module])
+ self.module_base.reset([module])
+ except dnf.exceptions.MarkingErrors as e:
+ failure_response['failures'].append(' '.join((module, to_native(e))))
+
+ for group in groups:
+ try:
+ self.base.group_remove(group)
+ except dnf.exceptions.CompsError:
+ # Group is already uninstalled.
+ pass
+ except AttributeError:
+ # Group either isn't installed or wasn't marked installed at install time
+ # because of DNF bug
+ #
+ # This is necessary until the upstream dnf API bug is fixed where installing
+ # a group via the dnf API doesn't actually mark the group as installed
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1620324
+ pass
+
+ for environment in environments:
+ try:
+ self.base.environment_remove(environment)
+ except dnf.exceptions.CompsError:
+ # Environment is already uninstalled.
+ pass
+
+ installed = self.base.sack.query().installed()
+ for pkg_spec in pkg_specs:
+ # short-circuit installed check for wildcard matching
+ if '*' in pkg_spec:
+ try:
+ self.base.remove(pkg_spec)
+ except dnf.exceptions.MarkingError as e:
+ is_failure, handled_remove_error = self._sanitize_dnf_error_msg_remove(pkg_spec, to_native(e))
+ if is_failure:
+ failure_response['failures'].append('{0} - {1}'.format(pkg_spec, to_native(e)))
+ else:
+ response['results'].append(handled_remove_error)
+ continue
+
+ installed_pkg = dnf.subject.Subject(pkg_spec).get_best_query(
+ sack=self.base.sack).installed().run()
+
+ for pkg in installed_pkg:
+ self.base.remove(str(pkg))
+
+ # Like the dnf CLI we want to allow recursive removal of dependent
+ # packages
+ self.allowerasing = True
+
+ if self.autoremove:
+ self.base.autoremove()
+
+ try:
+ # NOTE for people who go down the rabbit hole of figuring out why
+ # resolve() throws DepsolveError here on dep conflict, but not when
+ # called from the CLI: It's controlled by conf.best. When best is
+ # set, Hawkey will fail the goal, and resolve() in dnf.base.Base
+ # will throw. Otherwise if it's not set, the update (install) will
+ # be (almost silently) removed from the goal, and Hawkey will report
+ # success. Note that in this case, similar to the CLI, skip_broken
+ # does nothing to help here, so we don't take it into account at
+ # all.
+ if not self.base.resolve(allow_erasing=self.allowerasing):
+ if failure_response['failures']:
+ failure_response['msg'] = 'Failed to install some of the specified packages'
+ self.module.fail_json(**failure_response)
+
+ response['msg'] = "Nothing to do"
+ self.module.exit_json(**response)
+ else:
+ response['changed'] = True
+
+ # If packages got installed/removed, add them to the results.
+ # We do this early so we can use it for both check_mode and not.
+ if self.download_only:
+ install_action = 'Downloaded'
+ else:
+ install_action = 'Installed'
+ for package in self.base.transaction.install_set:
+ response['results'].append("{0}: {1}".format(install_action, package))
+ for package in self.base.transaction.remove_set:
+ response['results'].append("Removed: {0}".format(package))
+
+ if failure_response['failures']:
+ failure_response['msg'] = 'Failed to install some of the specified packages'
+ self.module.fail_json(**failure_response)
+ if self.module.check_mode:
+ response['msg'] = "Check mode: No changes made, but would have if not in check mode"
+ self.module.exit_json(**response)
+
+ try:
+ if self.download_only and self.download_dir and self.base.conf.destdir:
+ dnf.util.ensure_dir(self.base.conf.destdir)
+ self.base.repos.all().pkgdir = self.base.conf.destdir
+
+ self.base.download_packages(self.base.transaction.install_set)
+ except dnf.exceptions.DownloadError as e:
+ self.module.fail_json(
+ msg="Failed to download packages: {0}".format(to_text(e)),
+ results=[],
+ )
+
+ # Validate GPG. This is NOT done in dnf.Base (it's done in the
+ # upstream CLI subclass of dnf.Base)
+ if not self.disable_gpg_check:
+ for package in self.base.transaction.install_set:
+ fail = False
+ gpgres, gpgerr = self.base._sig_check_pkg(package)
+ if gpgres == 0: # validated successfully
+ continue
+ elif gpgres == 1: # validation failed, install cert?
+ try:
+ self.base._get_key_for_package(package)
+ except dnf.exceptions.Error as e:
+ fail = True
+ else: # fatal error
+ fail = True
+
+ if fail:
+ msg = 'Failed to validate GPG signature for {0}: {1}'.format(package, gpgerr)
+ self.module.fail_json(msg)
+
+ if self.download_only:
+ # No further work left to do, and the results were already updated above.
+ # Just return them.
+ self.module.exit_json(**response)
+ else:
+ tid = self.base.do_transaction()
+ if tid is not None:
+ transaction = self.base.history.old([tid])[0]
+ if transaction.return_code:
+ failure_response['failures'].append(transaction.output())
+
+ if failure_response['failures']:
+ failure_response['msg'] = 'Failed to install some of the specified packages'
+ self.module.fail_json(**failure_response)
+ self.module.exit_json(**response)
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occurred: {0}".format(to_native(e))
+ self.module.fail_json(**failure_response)
+ except dnf.exceptions.Error as e:
+ if to_text("already installed") in to_text(e):
+ response['changed'] = False
+ response['results'].append("Package already installed: {0}".format(to_native(e)))
+ self.module.exit_json(**response)
+ else:
+ failure_response['msg'] = "Unknown Error occurred: {0}".format(to_native(e))
+ self.module.fail_json(**failure_response)
+
+ def run(self):
+ """The main function."""
+
+ # Check if autoremove is called correctly
+ if self.autoremove:
+ if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'):
+ self.module.fail_json(
+ msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__,
+ results=[],
+ )
+
+ # Check if download_dir is called correctly
+ if self.download_dir:
+ if LooseVersion(dnf.__version__) < LooseVersion('2.6.2'):
+ self.module.fail_json(
+ msg="download_dir requires dnf>=2.6.2. Current dnf version is %s" % dnf.__version__,
+ results=[],
+ )
+
+ if self.update_cache and not self.names and not self.list:
+ self.base = self._base(
+ self.conf_file, self.disable_gpg_check, self.disablerepo,
+ self.enablerepo, self.installroot, self.sslverify
+ )
+ self.module.exit_json(
+ msg="Cache updated",
+ changed=False,
+ results=[],
+ rc=0
+ )
+
+ # Set state as installed by default
+ # This is not set in AnsibleModule() because the following shouldn't happen
+ # - dnf: autoremove=yes state=installed
+ if self.state is None:
+ self.state = 'installed'
+
+ if self.list:
+ self.base = self._base(
+ self.conf_file, self.disable_gpg_check, self.disablerepo,
+ self.enablerepo, self.installroot, self.sslverify
+ )
+ self.list_items(self.list)
+ else:
+ # Note: base takes a long time to run so we want to check for failure
+ # before running it.
+ if not self.download_only and not dnf.util.am_i_root():
+ self.module.fail_json(
+ msg="This command has to be run under the root user.",
+ results=[],
+ )
+ self.base = self._base(
+ self.conf_file, self.disable_gpg_check, self.disablerepo,
+ self.enablerepo, self.installroot, self.sslverify
+ )
+
+ if self.with_modules:
+ self.module_base = dnf.module.module_base.ModuleBase(self.base)
+
+ self.ensure()
+
+
+def main():
+ # state=installed name=pkgspec
+ # state=removed name=pkgspec
+ # state=latest name=pkgspec
+ #
+ # informational commands:
+ # list=installed
+ # list=updates
+ # list=available
+ # list=repos
+ # list=pkgspec
+
+ # Extend yumdnf_argument_spec with dnf-specific features that will never be
+ # backported to yum because yum is now in "maintenance mode" upstream
+ yumdnf_argument_spec['argument_spec']['allowerasing'] = dict(default=False, type='bool')
+ yumdnf_argument_spec['argument_spec']['nobest'] = dict(default=False, type='bool')
+
+ module = AnsibleModule(
+ **yumdnf_argument_spec
+ )
+
+ module_implementation = DnfModule(module)
+ try:
+ module_implementation.run()
+ except dnf.exceptions.RepoError as de:
+ module.fail_json(
+ msg="Failed to synchronize repodata: {0}".format(to_native(de)),
+ rc=1,
+ results=[],
+ changed=False
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/dpkg_selections.py b/lib/ansible/modules/dpkg_selections.py
new file mode 100644
index 0000000..87cad52
--- /dev/null
+++ b/lib/ansible/modules/dpkg_selections.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dpkg_selections
+short_description: Dpkg package selection selections
+description:
+ - Change dpkg package selection state via --get-selections and --set-selections.
+version_added: "2.0"
+author:
+- Brian Brazil (@brian-brazil) <brian.brazil@boxever.com>
+options:
+ name:
+ description:
+ - Name of the package.
+ required: true
+ type: str
+ selection:
+ description:
+ - The selection state to set the package to.
+ choices: [ 'install', 'hold', 'deinstall', 'purge' ]
+ required: true
+ type: str
+extends_documentation_fragment:
+- action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ support: full
+ platforms: debian
+notes:
+ - This module won't cause any packages to be installed/removed/purged, use the C(apt) module for that.
+'''
+EXAMPLES = '''
+- name: Prevent python from being upgraded
+ ansible.builtin.dpkg_selections:
+ name: python
+ selection: hold
+
+- name: Allow python to be upgraded
+ ansible.builtin.dpkg_selections:
+ name: python
+ selection: install
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ selection=dict(choices=['install', 'hold', 'deinstall', 'purge'], required=True)
+ ),
+ supports_check_mode=True,
+ )
+
+ dpkg = module.get_bin_path('dpkg', True)
+
+ name = module.params['name']
+ selection = module.params['selection']
+
+ # Get current settings.
+ rc, out, err = module.run_command([dpkg, '--get-selections', name], check_rc=True)
+ if not out:
+ current = 'not present'
+ else:
+ current = out.split()[1]
+
+ changed = current != selection
+
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, before=current, after=selection)
+
+ module.run_command([dpkg, '--set-selections'], data="%s %s" % (name, selection), check_rc=True)
+ module.exit_json(changed=changed, before=current, after=selection)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/expect.py b/lib/ansible/modules/expect.py
new file mode 100644
index 0000000..99ffe9f
--- /dev/null
+++ b/lib/ansible/modules/expect.py
@@ -0,0 +1,258 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: expect
+version_added: '2.0'
+short_description: Executes a command and responds to prompts
+description:
+ - The C(expect) module executes a command and responds to prompts.
+ - The given command will be executed on all selected nodes. It will not be
+ processed through the shell, so variables like C($HOME) and operations
+ like C("<"), C(">"), C("|"), and C("&") will not work.
+options:
+ command:
+ description:
+ - The command module takes command to run.
+ required: true
+ type: str
+ creates:
+ type: path
+ description:
+ - A filename, when it already exists, this step will B(not) be run.
+ removes:
+ type: path
+ description:
+ - A filename, when it does not exist, this step will B(not) be run.
+ chdir:
+ type: path
+ description:
+ - Change into this directory before running the command.
+ responses:
+ type: dict
+ description:
+ - Mapping of expected string/regex and string to respond with. If the
+ response is a list, successive matches return successive
+ responses. List functionality is new in 2.1.
+ required: true
+ timeout:
+ type: int
+ description:
+ - Amount of time in seconds to wait for the expected strings. Use
+ C(null) to disable timeout.
+ default: 30
+ echo:
+ description:
+ - Whether or not to echo out your response strings.
+ default: false
+ type: bool
+requirements:
+ - python >= 2.6
+ - pexpect >= 3.3
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ support: full
+ platforms: posix
+notes:
+ - If you want to run a command through the shell (say you are using C(<),
+ C(>), C(|), and so on), you must specify a shell in the command such as
+ C(/bin/bash -c "/path/to/something | grep else").
+ - The question, or key, under I(responses) is a python regex match. Case
+ insensitive searches are indicated with a prefix of C(?i).
+ - The C(pexpect) library used by this module operates with a search window
+ of 2000 bytes, and does not use a multiline regex match. To perform a
+ start of line bound match, use a pattern like ``(?m)^pattern``
+ - By default, if a question is encountered multiple times, its string
+ response will be repeated. If you need different responses for successive
+ question matches, instead of a string response, use a list of strings as
+ the response. The list functionality is new in 2.1.
+ - The M(ansible.builtin.expect) module is designed for simple scenarios.
+ For more complex needs, consider the use of expect code with the M(ansible.builtin.shell)
+ or M(ansible.builtin.script) modules. (An example is part of the M(ansible.builtin.shell) module documentation).
+seealso:
+- module: ansible.builtin.script
+- module: ansible.builtin.shell
+author: "Matt Martz (@sivel)"
+'''
+
+EXAMPLES = r'''
+- name: Case insensitive password string match
+ ansible.builtin.expect:
+ command: passwd username
+ responses:
+ (?i)password: "MySekretPa$$word"
+ # you don't want to show passwords in your logs
+ no_log: true
+
+- name: Generic question with multiple different responses
+ ansible.builtin.expect:
+ command: /path/to/custom/command
+ responses:
+ Question:
+ - response1
+ - response2
+ - response3
+'''
+
+import datetime
+import os
+import traceback
+
+PEXPECT_IMP_ERR = None
+try:
+ import pexpect
+ HAS_PEXPECT = True
+except ImportError:
+ PEXPECT_IMP_ERR = traceback.format_exc()
+ HAS_PEXPECT = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+def response_closure(module, question, responses):
+ resp_gen = (b'%s\n' % to_bytes(r).rstrip(b'\n') for r in responses)
+
+ def wrapped(info):
+ try:
+ return next(resp_gen)
+ except StopIteration:
+ module.fail_json(msg="No remaining responses for '%s', "
+ "output was '%s'" %
+ (question,
+ info['child_result_list'][-1]))
+
+ return wrapped
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=True),
+ chdir=dict(type='path'),
+ creates=dict(type='path'),
+ removes=dict(type='path'),
+ responses=dict(type='dict', required=True),
+ timeout=dict(type='int', default=30),
+ echo=dict(type='bool', default=False),
+ )
+ )
+
+ if not HAS_PEXPECT:
+ module.fail_json(msg=missing_required_lib("pexpect"),
+ exception=PEXPECT_IMP_ERR)
+
+ chdir = module.params['chdir']
+ args = module.params['command']
+ creates = module.params['creates']
+ removes = module.params['removes']
+ responses = module.params['responses']
+ timeout = module.params['timeout']
+ echo = module.params['echo']
+
+ events = dict()
+ for key, value in responses.items():
+ if isinstance(value, list):
+ response = response_closure(module, key, value)
+ else:
+ response = b'%s\n' % to_bytes(value).rstrip(b'\n')
+
+ events[to_bytes(key)] = response
+
+ if args.strip() == '':
+ module.fail_json(rc=256, msg="no command given")
+
+ if chdir:
+ chdir = os.path.abspath(chdir)
+ os.chdir(chdir)
+
+ if creates:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of command executions.
+ if os.path.exists(creates):
+ module.exit_json(
+ cmd=args,
+ stdout="skipped, since %s exists" % creates,
+ changed=False,
+ rc=0
+ )
+
+ if removes:
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of command executions.
+ if not os.path.exists(removes):
+ module.exit_json(
+ cmd=args,
+ stdout="skipped, since %s does not exist" % removes,
+ changed=False,
+ rc=0
+ )
+
+ startd = datetime.datetime.now()
+
+ try:
+ try:
+ # Prefer pexpect.run from pexpect>=4
+ b_out, rc = pexpect.run(args, timeout=timeout, withexitstatus=True,
+ events=events, cwd=chdir, echo=echo,
+ encoding=None)
+ except TypeError:
+ # Use pexpect._run in pexpect>=3.3,<4
+ # pexpect.run doesn't support `echo`
+ # pexpect.runu doesn't support encoding=None
+ b_out, rc = pexpect._run(args, timeout=timeout, withexitstatus=True,
+ events=events, extra_args=None, logfile=None,
+ cwd=chdir, env=None, _spawn=pexpect.spawn,
+ echo=echo)
+
+ except (TypeError, AttributeError) as e:
+ # This should catch all insufficient versions of pexpect
+ # We deem them insufficient for their lack of ability to specify
+ # to not echo responses via the run/runu functions, which would
+ # potentially leak sensitive information
+ module.fail_json(msg='Insufficient version of pexpect installed '
+ '(%s), this module requires pexpect>=3.3. '
+ 'Error was %s' % (pexpect.__version__, to_native(e)))
+ except pexpect.ExceptionPexpect as e:
+ module.fail_json(msg='%s' % to_native(e), exception=traceback.format_exc())
+
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ if b_out is None:
+ b_out = b''
+
+ result = dict(
+ cmd=args,
+ stdout=to_native(b_out).rstrip('\r\n'),
+ rc=rc,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ changed=True,
+ )
+
+ if rc is None:
+ module.fail_json(msg='command exceeded timeout', **result)
+ elif rc != 0:
+ module.fail_json(msg='non-zero return code', **result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/fail.py b/lib/ansible/modules/fail.py
new file mode 100644
index 0000000..8d3fa15
--- /dev/null
+++ b/lib/ansible/modules/fail.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: fail
+short_description: Fail with custom message
+description:
+- This module fails the progress with a custom message.
+- It can be useful for bailing out when a certain condition is met using C(when).
+- This module is also supported for Windows targets.
+version_added: "0.8"
+options:
+ msg:
+ description:
+ - The customized message used for failing execution.
+ - If omitted, fail will simply bail out with a generic message.
+ type: str
+ default: Failed as requested from task
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ become:
+ support: none
+ bypass_host_loop:
+ support: none
+ connection:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ delegation:
+ details: Aside from C(register) and/or in combination with C(delegate_facts), it has little effect.
+ support: partial
+ platform:
+ platforms: all
+seealso:
+- module: ansible.builtin.assert
+- module: ansible.builtin.debug
+- module: ansible.builtin.meta
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Example using fail and when together
+ ansible.builtin.fail:
+ msg: The system may not be provisioned according to the CMDB status.
+ when: cmdb_status != "to-be-staged"
+'''
diff --git a/lib/ansible/modules/fetch.py b/lib/ansible/modules/fetch.py
new file mode 100644
index 0000000..646f78d
--- /dev/null
+++ b/lib/ansible/modules/fetch.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: fetch
+short_description: Fetch files from remote nodes
+description:
+- This module works like M(ansible.builtin.copy), but in reverse.
+- It is used for fetching files from remote machines and storing them locally in a file tree, organized by hostname.
+- Files that already exist at I(dest) will be overwritten if they are different than the I(src).
+- This module is also supported for Windows targets.
+version_added: '0.2'
+options:
+ src:
+ description:
+ - The file on the remote system to fetch.
+ - This I(must) be a file, not a directory.
+ - Recursive fetching may be supported in a later release.
+ required: yes
+ dest:
+ description:
+ - A directory to save the file into.
+ - For example, if the I(dest) directory is C(/backup) a I(src) file named C(/etc/profile) on host
+ C(host.example.com), would be saved into C(/backup/host.example.com/etc/profile).
+ The host name is based on the inventory name.
+ required: yes
+ fail_on_missing:
+ version_added: '1.1'
+ description:
+ - When set to C(true), the task will fail if the remote file cannot be read for any reason.
+ - Prior to Ansible 2.5, setting this would only fail if the source file was missing.
+ - The default was changed to C(true) in Ansible 2.5.
+ type: bool
+ default: yes
+ validate_checksum:
+ version_added: '1.4'
+ description:
+ - Verify that the source and destination checksums match after the files are fetched.
+ type: bool
+ default: yes
+ flat:
+ version_added: '1.2'
+ description:
+ - Allows you to override the default behavior of appending hostname/path/to/file to the destination.
+ - If C(dest) ends with '/', it will use the basename of the source file, similar to the copy module.
+ - This can be useful if working with a single host, or if retrieving files that are uniquely named per host.
+ - If using multiple hosts with the same filename, the file will be overwritten for each host.
+ type: bool
+ default: no
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.files
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix, windows
+ safe_file_operations:
+ support: none
+ vault:
+ support: none
+notes:
+- When running fetch with C(become), the M(ansible.builtin.slurp) module will also be
+ used to fetch the contents of the file for determining the remote
+ checksum. This effectively doubles the transfer size, and
+ depending on the file size can consume all available memory on the
+ remote or local hosts causing a C(MemoryError). Due to this it is
+ advisable to run this module without C(become) whenever possible.
+- Prior to Ansible 2.5 this module would not fail if reading the remote
+ file was impossible unless C(fail_on_missing) was set.
+- In Ansible 2.5 or later, playbook authors are encouraged to use
+ C(fail_when) or C(ignore_errors) to get this ability. They may
+ also explicitly set C(fail_on_missing) to C(false) to get the
+ non-failing behaviour.
+seealso:
+- module: ansible.builtin.copy
+- module: ansible.builtin.slurp
+author:
+- Ansible Core Team
+- Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Store file into /tmp/fetched/host.example.com/tmp/somefile
+ ansible.builtin.fetch:
+ src: /tmp/somefile
+ dest: /tmp/fetched
+
+- name: Specifying a path directly
+ ansible.builtin.fetch:
+ src: /tmp/somefile
+ dest: /tmp/prefix-{{ inventory_hostname }}
+ flat: yes
+
+- name: Specifying a destination path
+ ansible.builtin.fetch:
+ src: /tmp/uniquefile
+ dest: /tmp/special/
+ flat: yes
+
+- name: Storing in a path relative to the playbook
+ ansible.builtin.fetch:
+ src: /tmp/uniquefile
+ dest: special/prefix-{{ inventory_hostname }}
+ flat: yes
+'''
diff --git a/lib/ansible/modules/file.py b/lib/ansible/modules/file.py
new file mode 100644
index 0000000..72b510c
--- /dev/null
+++ b/lib/ansible/modules/file.py
@@ -0,0 +1,987 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: file
+version_added: historical
+short_description: Manage files and file properties
+extends_documentation_fragment: [files, action_common_attributes]
+description:
+- Set attributes of files, directories, or symlinks and their targets.
+- Alternatively, remove files, symlinks or directories.
+- Many other modules support the same options as the C(file) module - including M(ansible.builtin.copy),
+ M(ansible.builtin.template), and M(ansible.builtin.assemble).
+- For Windows targets, use the M(ansible.windows.win_file) module instead.
+options:
+ path:
+ description:
+ - Path to the file being managed.
+ type: path
+ required: yes
+ aliases: [ dest, name ]
+ state:
+ description:
+ - If C(absent), directories will be recursively deleted, and files or symlinks will
+ be unlinked. In the case of a directory, if C(diff) is declared, you will see the files and folders deleted listed
+ under C(path_contents). Note that C(absent) will not cause C(file) to fail if the C(path) does
+ not exist as the state did not change.
+ - If C(directory), all intermediate subdirectories will be created if they
+ do not exist. Since Ansible 1.7 they will be created with the supplied permissions.
+ - If C(file), with no other options, returns the current state of C(path).
+ - If C(file), even with other options (such as C(mode)), the file will be modified if it exists but will NOT be created if it does not exist.
+ Set to C(touch) or use the M(ansible.builtin.copy) or M(ansible.builtin.template) module if you want to create the file if it does not exist.
+ - If C(hard), the hard link will be created or changed.
+ - If C(link), the symbolic link will be created or changed.
+ - If C(touch) (new in 1.4), an empty file will be created if the file does not
+ exist, while an existing file or directory will receive updated file access and
+ modification times (similar to the way C(touch) works from the command line).
+ - Default is the current state of the file if it exists, C(directory) if C(recurse=yes), or C(file) otherwise.
+ type: str
+ choices: [ absent, directory, file, hard, link, touch ]
+ src:
+ description:
+ - Path of the file to link to.
+ - This applies only to C(state=link) and C(state=hard).
+ - For C(state=link), this will also accept a non-existing path.
+ - Relative paths are relative to the file being created (C(path)) which is how
+ the Unix command C(ln -s SRC DEST) treats relative paths.
+ type: path
+ recurse:
+ description:
+ - Recursively set the specified file attributes on directory contents.
+ - This applies only when C(state) is set to C(directory).
+ type: bool
+ default: no
+ version_added: '1.1'
+ force:
+ description:
+ - >
+ Force the creation of the symlinks in two cases: the source file does
+ not exist (but will appear later); the destination exists and is a file (so, we need to unlink the
+ C(path) file and create symlink to the C(src) file in place of it).
+ type: bool
+ default: no
+ follow:
+ description:
+ - This flag indicates that filesystem links, if they exist, should be followed.
+ - I(follow=yes) and I(state=link) can modify I(src) when combined with parameters such as I(mode).
+ - Previous to Ansible 2.5, this was C(false) by default.
+ type: bool
+ default: yes
+ version_added: '1.8'
+ modification_time:
+ description:
+ - This parameter indicates the time the file's modification time should be set to.
+ - Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
+ - Default is None meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
+ type: str
+ version_added: "2.7"
+ modification_time_format:
+ description:
+ - When used with C(modification_time), indicates the time format that must be used.
+ - Based on default Python format (see time.strftime doc).
+ type: str
+ default: "%Y%m%d%H%M.%S"
+ version_added: '2.7'
+ access_time:
+ description:
+ - This parameter indicates the time the file's access time should be set to.
+ - Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
+ - Default is C(None) meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
+ type: str
+ version_added: '2.7'
+ access_time_format:
+ description:
+ - When used with C(access_time), indicates the time format that must be used.
+ - Based on default Python format (see time.strftime doc).
+ type: str
+ default: "%Y%m%d%H%M.%S"
+ version_added: '2.7'
+seealso:
+- module: ansible.builtin.assemble
+- module: ansible.builtin.copy
+- module: ansible.builtin.stat
+- module: ansible.builtin.template
+- module: ansible.windows.win_file
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ details: permissions and ownership will be shown but file contents on absent/touch will not.
+ support: partial
+ platform:
+ platforms: posix
+author:
+- Ansible Core Team
+- Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Change file ownership, group and permissions
+ ansible.builtin.file:
+ path: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: '0644'
+
+- name: Give insecure permissions to an existing file
+ ansible.builtin.file:
+ path: /work
+ owner: root
+ group: root
+ mode: '1777'
+
+- name: Create a symbolic link
+ ansible.builtin.file:
+ src: /file/to/link/to
+ dest: /path/to/symlink
+ owner: foo
+ group: foo
+ state: link
+
+- name: Create two hard links
+ ansible.builtin.file:
+ src: '/tmp/{{ item.src }}'
+ dest: '{{ item.dest }}'
+ state: hard
+ loop:
+ - { src: x, dest: y }
+ - { src: z, dest: k }
+
+- name: Touch a file, using symbolic modes to set the permissions (equivalent to 0644)
+ ansible.builtin.file:
+ path: /etc/foo.conf
+ state: touch
+ mode: u=rw,g=r,o=r
+
+- name: Touch the same file, but add/remove some permissions
+ ansible.builtin.file:
+ path: /etc/foo.conf
+ state: touch
+ mode: u+rw,g-wx,o-rwx
+
+- name: Touch again the same file, but do not change times this makes the task idempotent
+ ansible.builtin.file:
+ path: /etc/foo.conf
+ state: touch
+ mode: u+rw,g-wx,o-rwx
+ modification_time: preserve
+ access_time: preserve
+
+- name: Create a directory if it does not exist
+ ansible.builtin.file:
+ path: /etc/some_directory
+ state: directory
+ mode: '0755'
+
+- name: Update modification and access time of given file
+ ansible.builtin.file:
+ path: /etc/some_file
+ state: file
+ modification_time: now
+ access_time: now
+
+- name: Set access time based on seconds from epoch value
+ ansible.builtin.file:
+ path: /etc/another_file
+ state: file
+ access_time: '{{ "%Y%m%d%H%M.%S" | strftime(stat_var.stat.atime) }}'
+
+- name: Recursively change ownership of a directory
+ ansible.builtin.file:
+ path: /etc/foo
+ state: directory
+ recurse: yes
+ owner: foo
+ group: foo
+
+- name: Remove file (delete file)
+ ansible.builtin.file:
+ path: /etc/foo.txt
+ state: absent
+
+- name: Recursively remove directory
+ ansible.builtin.file:
+ path: /etc/foo
+ state: absent
+
+'''
+RETURN = r'''
+dest:
+ description: Destination file/path, equal to the value passed to I(path).
+ returned: state=touch, state=hard, state=link
+ type: str
+ sample: /path/to/file.txt
+path:
+ description: Destination file/path, equal to the value passed to I(path).
+ returned: state=absent, state=directory, state=file
+ type: str
+ sample: /path/to/file.txt
+'''
+
+import errno
+import os
+import shutil
+import sys
+import time
+
+from pwd import getpwnam, getpwuid
+from grp import getgrnam, getgrgid
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+# There will only be a single AnsibleModule object per module
+module = None
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+ def __repr__(self):
+ return 'AnsibleModuleError(results={0})'.format(self.results)
+
+
+class ParameterError(AnsibleModuleError):
+ pass
+
+
+class Sentinel(object):
+ def __new__(cls, *args, **kwargs):
+ return cls
+
+
+def _ansible_excepthook(exc_type, exc_value, tb):
+ # Using an exception allows us to catch it if the calling code knows it can recover
+ if issubclass(exc_type, AnsibleModuleError):
+ module.fail_json(**exc_value.results)
+ else:
+ sys.__excepthook__(exc_type, exc_value, tb)
+
+
+def additional_parameter_handling(params):
+ """Additional parameter validation and reformatting"""
+ # When path is a directory, rewrite the pathname to be the file inside of the directory
+ # TODO: Why do we exclude link? Why don't we exclude directory? Should we exclude touch?
+ # I think this is where we want to be in the future:
+ # when isdir(path):
+ # if state == absent: Remove the directory
+ # if state == touch: Touch the directory
+ # if state == directory: Assert the directory is the same as the one specified
+ # if state == file: place inside of the directory (use _original_basename)
+ # if state == link: place inside of the directory (use _original_basename. Fallback to src?)
+ # if state == hard: place inside of the directory (use _original_basename. Fallback to src?)
+ if (params['state'] not in ("link", "absent") and os.path.isdir(to_bytes(params['path'], errors='surrogate_or_strict'))):
+ basename = None
+
+ if params['_original_basename']:
+ basename = params['_original_basename']
+ elif params['src']:
+ basename = os.path.basename(params['src'])
+
+ if basename:
+ params['path'] = os.path.join(params['path'], basename)
+
+ # state should default to file, but since that creates many conflicts,
+ # default state to 'current' when it exists.
+ prev_state = get_state(to_bytes(params['path'], errors='surrogate_or_strict'))
+
+ if params['state'] is None:
+ if prev_state != 'absent':
+ params['state'] = prev_state
+ elif params['recurse']:
+ params['state'] = 'directory'
+ else:
+ params['state'] = 'file'
+
+ # make sure the target path is a directory when we're doing a recursive operation
+ if params['recurse'] and params['state'] != 'directory':
+ raise ParameterError(results={"msg": "recurse option requires state to be 'directory'",
+ "path": params["path"]})
+
+ # Fail if 'src' but no 'state' is specified
+ if params['src'] and params['state'] not in ('link', 'hard'):
+ raise ParameterError(results={'msg': "src option requires state to be 'link' or 'hard'",
+ 'path': params['path']})
+
+
+def get_state(path):
+ ''' Find out current state '''
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ try:
+ if os.path.lexists(b_path):
+ if os.path.islink(b_path):
+ return 'link'
+ elif os.path.isdir(b_path):
+ return 'directory'
+ elif os.stat(b_path).st_nlink > 1:
+ return 'hard'
+
+ # could be many other things, but defaulting to file
+ return 'file'
+
+ return 'absent'
+ except OSError as e:
+ if e.errno == errno.ENOENT: # It may already have been removed
+ return 'absent'
+ else:
+ raise
+
+
+# This should be moved into the common file utilities
+def recursive_set_attributes(b_path, follow, file_args, mtime, atime):
+ changed = False
+
+ try:
+ for b_root, b_dirs, b_files in os.walk(b_path):
+ for b_fsobj in b_dirs + b_files:
+ b_fsname = os.path.join(b_root, b_fsobj)
+ if not os.path.islink(b_fsname):
+ tmp_file_args = file_args.copy()
+ tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
+ changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
+ changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
+
+ else:
+ # Change perms on the link
+ tmp_file_args = file_args.copy()
+ tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
+ changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
+ changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
+
+ if follow:
+ b_fsname = os.path.join(b_root, os.readlink(b_fsname))
+ # The link target could be nonexistent
+ if os.path.exists(b_fsname):
+ if os.path.isdir(b_fsname):
+ # Link is a directory so change perms on the directory's contents
+ changed |= recursive_set_attributes(b_fsname, follow, file_args, mtime, atime)
+
+ # Change perms on the file pointed to by the link
+ tmp_file_args = file_args.copy()
+ tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
+ changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
+ changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
+ except RuntimeError as e:
+ # on Python3 "RecursionError" is raised which is derived from "RuntimeError"
+ # TODO once this function is moved into the common file utilities, this should probably raise more general exception
+ raise AnsibleModuleError(
+ results={'msg': "Could not recursively set attributes on %s. Original error was: '%s'" % (to_native(b_path), to_native(e))}
+ )
+
+ return changed
+
+
+def initial_diff(path, state, prev_state):
+ diff = {'before': {'path': path},
+ 'after': {'path': path},
+ }
+
+ if prev_state != state:
+ diff['before']['state'] = prev_state
+ diff['after']['state'] = state
+ if state == 'absent' and prev_state == 'directory':
+ walklist = {
+ 'directories': [],
+ 'files': [],
+ }
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ for base_path, sub_folders, files in os.walk(b_path):
+ for folder in sub_folders:
+ folderpath = os.path.join(base_path, folder)
+ walklist['directories'].append(folderpath)
+
+ for filename in files:
+ filepath = os.path.join(base_path, filename)
+ walklist['files'].append(filepath)
+
+ diff['before']['path_content'] = walklist
+
+ return diff
+
+#
+# States
+#
+
+
+def get_timestamp_for_time(formatted_time, time_format):
+ if formatted_time == 'preserve':
+ return None
+ elif formatted_time == 'now':
+ return Sentinel
+ else:
+ try:
+ struct = time.strptime(formatted_time, time_format)
+ struct_time = time.mktime(struct)
+ except (ValueError, OverflowError) as e:
+ raise AnsibleModuleError(results={'msg': 'Error while obtaining timestamp for time %s using format %s: %s'
+ % (formatted_time, time_format, to_native(e, nonstring='simplerepr'))})
+
+ return struct_time
+
+
+def update_timestamp_for_file(path, mtime, atime, diff=None):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ try:
+ # When mtime and atime are set to 'now', rely on utime(path, None) which does not require ownership of the file
+ # https://github.com/ansible/ansible/issues/50943
+ if mtime is Sentinel and atime is Sentinel:
+ # It's not exact but we can't rely on os.stat(path).st_mtime after setting os.utime(path, None) as it may
+ # not be updated. Just use the current time for the diff values
+ mtime = atime = time.time()
+
+ previous_mtime = os.stat(b_path).st_mtime
+ previous_atime = os.stat(b_path).st_atime
+
+ set_time = None
+ else:
+ # If both parameters are None 'preserve', nothing to do
+ if mtime is None and atime is None:
+ return False
+
+ previous_mtime = os.stat(b_path).st_mtime
+ previous_atime = os.stat(b_path).st_atime
+
+ if mtime is None:
+ mtime = previous_mtime
+ elif mtime is Sentinel:
+ mtime = time.time()
+
+ if atime is None:
+ atime = previous_atime
+ elif atime is Sentinel:
+ atime = time.time()
+
+ # If both timestamps are already ok, nothing to do
+ if mtime == previous_mtime and atime == previous_atime:
+ return False
+
+ set_time = (atime, mtime)
+
+ if not module.check_mode:
+ os.utime(b_path, set_time)
+
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ if 'after' not in diff:
+ diff['after'] = {}
+ if mtime != previous_mtime:
+ diff['before']['mtime'] = previous_mtime
+ diff['after']['mtime'] = mtime
+ if atime != previous_atime:
+ diff['before']['atime'] = previous_atime
+ diff['after']['atime'] = atime
+ except OSError as e:
+ raise AnsibleModuleError(results={'msg': 'Error while updating modification or access time: %s'
+ % to_native(e, nonstring='simplerepr'), 'path': path})
+ return True
+
+
+def keep_backward_compatibility_on_timestamps(parameter, state):
+ if state in ['file', 'hard', 'directory', 'link'] and parameter is None:
+ return 'preserve'
+ elif state == 'touch' and parameter is None:
+ return 'now'
+ else:
+ return parameter
+
+
+def execute_diff_peek(path):
+ """Take a guess as to whether a file is a binary file"""
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ appears_binary = False
+ try:
+ with open(b_path, 'rb') as f:
+ head = f.read(8192)
+ except Exception:
+ # If we can't read the file, we're okay assuming it's text
+ pass
+ else:
+ if b"\x00" in head:
+ appears_binary = True
+
+ return appears_binary
+
+
+def ensure_absent(path):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ result = {}
+
+ if prev_state != 'absent':
+ diff = initial_diff(path, 'absent', prev_state)
+
+ if not module.check_mode:
+ if prev_state == 'directory':
+ try:
+ shutil.rmtree(b_path, ignore_errors=False)
+ except Exception as e:
+ raise AnsibleModuleError(results={'msg': "rmtree failed: %s" % to_native(e)})
+ else:
+ try:
+ os.unlink(b_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT: # It may already have been removed
+ raise AnsibleModuleError(results={'msg': "unlinking failed: %s " % to_native(e),
+ 'path': path})
+
+ result.update({'path': path, 'changed': True, 'diff': diff, 'state': 'absent'})
+ else:
+ result.update({'path': path, 'changed': False, 'state': 'absent'})
+
+ return result
+
+
+def execute_touch(path, follow, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ changed = False
+ result = {'dest': path}
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+
+ # If the file did not already exist
+ if prev_state == 'absent':
+ # if we are in check mode and the file is absent
+ # we can set the changed status to True and return
+ if module.check_mode:
+ result['changed'] = True
+ return result
+ # Create an empty file
+ try:
+ open(b_path, 'wb').close()
+ changed = True
+ except (OSError, IOError) as e:
+ raise AnsibleModuleError(results={'msg': 'Error, could not touch target: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+ # Update the attributes on the file
+ diff = initial_diff(path, 'touch', prev_state)
+ file_args = module.load_file_common_arguments(module.params)
+ try:
+ changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+ except SystemExit as e:
+ if e.code: # this is the exit code passed to sys.exit, not a constant -- pylint: disable=using-constant-test
+ # We take this to mean that fail_json() was called from
+ # somewhere in basic.py
+ if prev_state == 'absent':
+ # If we just created the file we can safely remove it
+ os.remove(b_path)
+ raise
+
+ result['changed'] = changed
+ result['diff'] = diff
+ return result
+
+
+def ensure_file_attributes(path, follow, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ file_args = module.load_file_common_arguments(module.params)
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+
+ if prev_state != 'file':
+ if follow and prev_state == 'link':
+ # follow symlink and operate on original
+ b_path = os.path.realpath(b_path)
+ path = to_native(b_path, errors='strict')
+ prev_state = get_state(b_path)
+ file_args['path'] = path
+
+ if prev_state not in ('file', 'hard'):
+ # file is not absent and any other state is a conflict
+ raise AnsibleModuleError(results={'msg': 'file (%s) is %s, cannot continue' % (path, prev_state),
+ 'path': path, 'state': prev_state})
+
+ diff = initial_diff(path, 'file', prev_state)
+ changed = module.set_fs_attributes_if_different(file_args, False, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+ return {'path': path, 'changed': changed, 'diff': diff}
+
+
+def ensure_directory(path, follow, recurse, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ file_args = module.load_file_common_arguments(module.params)
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+
+ # For followed symlinks, we need to operate on the target of the link
+ if follow and prev_state == 'link':
+ b_path = os.path.realpath(b_path)
+ path = to_native(b_path, errors='strict')
+ file_args['path'] = path
+ prev_state = get_state(b_path)
+
+ changed = False
+ diff = initial_diff(path, 'directory', prev_state)
+
+ if prev_state == 'absent':
+ # Create directory and assign permissions to it
+ if module.check_mode:
+ return {'path': path, 'changed': True, 'diff': diff}
+ curpath = ''
+
+ try:
+ # Split the path so we can apply filesystem attributes recursively
+ # from the root (/) directory for absolute paths or the base path
+ # of a relative path. We can then walk the appropriate directory
+ # path to apply attributes.
+ # Something like mkdir -p with mode applied to all of the newly created directories
+ for dirname in path.strip('/').split('/'):
+ curpath = '/'.join([curpath, dirname])
+ # Remove leading slash if we're creating a relative path
+ if not os.path.isabs(path):
+ curpath = curpath.lstrip('/')
+ b_curpath = to_bytes(curpath, errors='surrogate_or_strict')
+ if not os.path.exists(b_curpath):
+ try:
+ os.mkdir(b_curpath)
+ changed = True
+ except OSError as ex:
+ # Possibly something else created the dir since the os.path.exists
+ # check above. As long as it's a dir, we don't need to error out.
+ if not (ex.errno == errno.EEXIST and os.path.isdir(b_curpath)):
+ raise
+ tmp_file_args = file_args.copy()
+ tmp_file_args['path'] = curpath
+ changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+ except Exception as e:
+ raise AnsibleModuleError(results={'msg': 'There was an issue creating %s as requested:'
+ ' %s' % (curpath, to_native(e)),
+ 'path': path})
+ return {'path': path, 'changed': changed, 'diff': diff}
+
+ elif prev_state != 'directory':
+ # We already know prev_state is not 'absent', therefore it exists in some form.
+ raise AnsibleModuleError(results={'msg': '%s already exists as a %s' % (path, prev_state),
+ 'path': path})
+
+ #
+ # previous state == directory
+ #
+
+ changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+ if recurse:
+ changed |= recursive_set_attributes(b_path, follow, file_args, mtime, atime)
+
+ return {'path': path, 'changed': changed, 'diff': diff}
+
+
+def ensure_symlink(path, src, follow, force, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+ # source is both the source of a symlink or an informational passing of the src for a template module
+ # or copy module, even if this module never uses it, it is needed to key off some things
+ if src is None:
+ if follow and os.path.exists(b_path):
+ # use the current target of the link as the source
+ src = to_native(os.readlink(b_path), errors='strict')
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+
+ if not os.path.islink(b_path) and os.path.isdir(b_path):
+ relpath = path
+ else:
+ b_relpath = os.path.dirname(b_path)
+ relpath = to_native(b_relpath, errors='strict')
+
+ # If src is None that means we are expecting to update an existing link.
+ if src is None:
+ absrc = None
+ else:
+ absrc = os.path.join(relpath, src)
+
+ b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
+ if not force and src is not None and not os.path.exists(b_absrc):
+ raise AnsibleModuleError(results={'msg': 'src file does not exist, use "force=yes" if you'
+ ' really want to create the link: %s' % absrc,
+ 'path': path, 'src': src})
+
+ if prev_state == 'directory':
+ if not force:
+ raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
+ % (prev_state, path),
+ 'path': path})
+ elif os.listdir(b_path):
+ # refuse to replace a directory that has files in it
+ raise AnsibleModuleError(results={'msg': 'the directory %s is not empty, refusing to'
+ ' convert it' % path,
+ 'path': path})
+ elif prev_state in ('file', 'hard') and not force:
+ raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
+ % (prev_state, path),
+ 'path': path})
+
+ diff = initial_diff(path, 'link', prev_state)
+ changed = False
+
+ if prev_state in ('hard', 'file', 'directory', 'absent'):
+ if src is None:
+ raise AnsibleModuleError(results={'msg': 'src is required for creating new symlinks'})
+ changed = True
+ elif prev_state == 'link':
+ if src is not None:
+ b_old_src = os.readlink(b_path)
+ if b_old_src != b_src:
+ diff['before']['src'] = to_native(b_old_src, errors='strict')
+ diff['after']['src'] = src
+ changed = True
+ else:
+ raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
+
+ if changed and not module.check_mode:
+ if prev_state != 'absent':
+ # try to replace atomically
+ b_tmppath = to_bytes(os.path.sep).join(
+ [os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
+ )
+ try:
+ if prev_state == 'directory':
+ os.rmdir(b_path)
+ os.symlink(b_src, b_tmppath)
+ os.rename(b_tmppath, b_path)
+ except OSError as e:
+ if os.path.exists(b_tmppath):
+ os.unlink(b_tmppath)
+ raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+ else:
+ try:
+ os.symlink(b_src, b_path)
+ except OSError as e:
+ raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+
+ if module.check_mode and not os.path.exists(b_path):
+ return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
+
+ # Now that we might have created the symlink, get the arguments.
+ # We need to do it now so we can properly follow the symlink if needed
+ # because load_file_common_arguments sets 'path' according
+ # the value of follow and the symlink existence.
+ file_args = module.load_file_common_arguments(module.params)
+
+ # Whenever we create a link to a nonexistent target we know that the nonexistent target
+ # cannot have any permissions set on it. Skip setting those and emit a warning (the user
+ # can set follow=False to remove the warning)
+ if follow and os.path.islink(b_path) and not os.path.exists(file_args['path']):
+ module.warn('Cannot set fs attributes on a non-existent symlink target. follow should be'
+ ' set to False to avoid this.')
+ else:
+ changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+
+ return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
+
+
+def ensure_hardlink(path, src, follow, force, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ file_args = module.load_file_common_arguments(module.params)
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+
+ # src is the source of a hardlink. We require it if we are creating a new hardlink.
+ # We require path in the argument_spec so we know it is present at this point.
+ if prev_state != 'hard' and src is None:
+ raise AnsibleModuleError(results={'msg': 'src is required for creating new hardlinks'})
+
+ # Even if the link already exists, if src was specified it needs to exist.
+ # The inode number will be compared to ensure the link has the correct target.
+ if src is not None and not os.path.exists(b_src):
+ raise AnsibleModuleError(results={'msg': 'src does not exist', 'dest': path, 'src': src})
+
+ diff = initial_diff(path, 'hard', prev_state)
+ changed = False
+
+ if prev_state == 'absent':
+ changed = True
+ elif prev_state == 'link':
+ b_old_src = os.readlink(b_path)
+ if b_old_src != b_src:
+ diff['before']['src'] = to_native(b_old_src, errors='strict')
+ diff['after']['src'] = src
+ changed = True
+ elif prev_state == 'hard':
+ if src is not None and not os.stat(b_path).st_ino == os.stat(b_src).st_ino:
+ changed = True
+ if not force:
+ raise AnsibleModuleError(results={'msg': 'Cannot link, different hard link exists at destination',
+ 'dest': path, 'src': src})
+ elif prev_state == 'file':
+ changed = True
+ if not force:
+ raise AnsibleModuleError(results={'msg': 'Cannot link, %s exists at destination' % prev_state,
+ 'dest': path, 'src': src})
+ elif prev_state == 'directory':
+ changed = True
+ if os.path.exists(b_path):
+ if os.stat(b_path).st_ino == os.stat(b_src).st_ino:
+ return {'path': path, 'changed': False}
+ elif not force:
+ raise AnsibleModuleError(results={'msg': 'Cannot link: different hard link exists at destination',
+ 'dest': path, 'src': src})
+ else:
+ raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
+
+ if changed and not module.check_mode:
+ if prev_state != 'absent':
+ # try to replace atomically
+ b_tmppath = to_bytes(os.path.sep).join(
+ [os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
+ )
+ try:
+ if prev_state == 'directory':
+ if os.path.exists(b_path):
+ try:
+ os.unlink(b_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT: # It may already have been removed
+ raise
+ os.link(b_src, b_tmppath)
+ os.rename(b_tmppath, b_path)
+ except OSError as e:
+ if os.path.exists(b_tmppath):
+ os.unlink(b_tmppath)
+ raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+ else:
+ try:
+ os.link(b_src, b_path)
+ except OSError as e:
+ raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+
+ if module.check_mode and not os.path.exists(b_path):
+ return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
+
+ changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+
+ return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
+
+
+def check_owner_exists(module, owner):
+ try:
+ uid = int(owner)
+ try:
+ getpwuid(uid).pw_name
+ except KeyError:
+ module.warn('failed to look up user with uid %s. Create user up to this point in real play' % uid)
+ except ValueError:
+ try:
+ getpwnam(owner).pw_uid
+ except KeyError:
+ module.warn('failed to look up user %s. Create user up to this point in real play' % owner)
+
+
+def check_group_exists(module, group):
+ try:
+ gid = int(group)
+ try:
+ getgrgid(gid).gr_name
+ except KeyError:
+ module.warn('failed to look up group with gid %s. Create group up to this point in real play' % gid)
+ except ValueError:
+ try:
+ getgrnam(group).gr_gid
+ except KeyError:
+ module.warn('failed to look up group %s. Create group up to this point in real play' % group)
+
+
+def main():
+
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', choices=['absent', 'directory', 'file', 'hard', 'link', 'touch']),
+ path=dict(type='path', required=True, aliases=['dest', 'name']),
+ _original_basename=dict(type='str'), # Internal use only, for recursive ops
+ recurse=dict(type='bool', default=False),
+ force=dict(type='bool', default=False), # Note: Should not be in file_common_args in future
+ follow=dict(type='bool', default=True), # Note: Different default than file_common_args
+ _diff_peek=dict(type='bool'), # Internal use only, for internal checks in the action plugins
+ src=dict(type='path'), # Note: Should not be in file_common_args in future
+ modification_time=dict(type='str'),
+ modification_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
+ access_time=dict(type='str'),
+ access_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # When we rewrite basic.py, we will do something similar to this on instantiating an AnsibleModule
+ sys.excepthook = _ansible_excepthook
+ additional_parameter_handling(module.params)
+ params = module.params
+
+ state = params['state']
+ recurse = params['recurse']
+ force = params['force']
+ follow = params['follow']
+ path = params['path']
+ src = params['src']
+
+ if module.check_mode and state != 'absent':
+ file_args = module.load_file_common_arguments(module.params)
+ if file_args['owner']:
+ check_owner_exists(module, file_args['owner'])
+ if file_args['group']:
+ check_group_exists(module, file_args['group'])
+
+ timestamps = {}
+ timestamps['modification_time'] = keep_backward_compatibility_on_timestamps(params['modification_time'], state)
+ timestamps['modification_time_format'] = params['modification_time_format']
+ timestamps['access_time'] = keep_backward_compatibility_on_timestamps(params['access_time'], state)
+ timestamps['access_time_format'] = params['access_time_format']
+
+ # short-circuit for diff_peek
+ if params['_diff_peek'] is not None:
+ appears_binary = execute_diff_peek(to_bytes(path, errors='surrogate_or_strict'))
+ module.exit_json(path=path, changed=False, appears_binary=appears_binary)
+
+ if state == 'file':
+ result = ensure_file_attributes(path, follow, timestamps)
+ elif state == 'directory':
+ result = ensure_directory(path, follow, recurse, timestamps)
+ elif state == 'link':
+ result = ensure_symlink(path, src, follow, force, timestamps)
+ elif state == 'hard':
+ result = ensure_hardlink(path, src, follow, force, timestamps)
+ elif state == 'touch':
+ result = execute_touch(path, follow, timestamps)
+ elif state == 'absent':
+ result = ensure_absent(path)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/find.py b/lib/ansible/modules/find.py
new file mode 100644
index 0000000..b13c841
--- /dev/null
+++ b/lib/ansible/modules/find.py
@@ -0,0 +1,534 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Ruggero Marchei <ruggero.marchei@daemonzone.net>
+# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
+# Copyright: (c) 2016-2017, Konstantin Shalygin <k0ste@k0ste.ru>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: find
+author: Brian Coca (@bcoca)
+version_added: "2.0"
+short_description: Return a list of files based on specific criteria
+description:
+ - Return a list of files based on specific criteria. Multiple criteria are AND'd together.
+ - For Windows targets, use the M(ansible.windows.win_find) module instead.
+options:
+ age:
+ description:
+ - Select files whose age is equal to or greater than the specified time.
+ - Use a negative age to find files equal to or less than the specified time.
+ - You can choose seconds, minutes, hours, days, or weeks by specifying the
+ first letter of any of those words (e.g., "1w").
+ type: str
+ patterns:
+ default: []
+ description:
+ - One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
+ - The patterns restrict the list of files to be returned to those whose basenames match at
+ least one of the patterns specified. Multiple patterns can be specified using a list.
+ - The pattern is matched against the file base name, excluding the directory.
+ - When using regexen, the pattern MUST match the ENTIRE file name, not just parts of it. So
+ if you are looking to match all files ending in .default, you'd need to use C(.*\.default)
+ as a regexp and not just C(\.default).
+ - This parameter expects a list, which can be either comma separated or YAML. If any of the
+ patterns contain a comma, make sure to put them in a list to avoid splitting the patterns
+ in undesirable ways.
+ - Defaults to C(*) when I(use_regex=False), or C(.*) when I(use_regex=True).
+ type: list
+ aliases: [ pattern ]
+ elements: str
+ excludes:
+ description:
+ - One or more (shell or regex) patterns, which type is controlled by I(use_regex) option.
+ - Items whose basenames match an I(excludes) pattern are culled from I(patterns) matches.
+ Multiple patterns can be specified using a list.
+ type: list
+ aliases: [ exclude ]
+ version_added: "2.5"
+ elements: str
+ contains:
+ description:
+ - A regular expression or pattern which should be matched against the file content.
+ - Works only when I(file_type) is C(file).
+ type: str
+ read_whole_file:
+ description:
+ - When doing a C(contains) search, determines whether the whole file should be read into
+ memory or if the regex should be applied to the file line-by-line.
+ - Setting this to C(true) can have performance and memory implications for large files.
+ - This uses C(re.search()) instead of C(re.match()).
+ type: bool
+ default: false
+ version_added: "2.11"
+ paths:
+ description:
+ - List of paths of directories to search. All paths must be fully qualified.
+ type: list
+ required: true
+ aliases: [ name, path ]
+ elements: str
+ file_type:
+ description:
+ - Type of file to select.
+ - The 'link' and 'any' choices were added in Ansible 2.3.
+ type: str
+ choices: [ any, directory, file, link ]
+ default: file
+ recurse:
+ description:
+ - If target is a directory, recursively descend into the directory looking for files.
+ type: bool
+ default: no
+ size:
+ description:
+ - Select files whose size is equal to or greater than the specified size.
+ - Use a negative size to find files equal to or less than the specified size.
+ - Unqualified values are in bytes but b, k, m, g, and t can be appended to specify
+ bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
+ - Size is not evaluated for directories.
+ type: str
+ age_stamp:
+ description:
+ - Choose the file property against which we compare age.
+ type: str
+ choices: [ atime, ctime, mtime ]
+ default: mtime
+ hidden:
+ description:
+ - Set this to C(true) to include hidden files, otherwise they will be ignored.
+ type: bool
+ default: no
+ follow:
+ description:
+ - Set this to C(true) to follow symlinks in path for systems with python 2.6+.
+ type: bool
+ default: no
+ get_checksum:
+ description:
+ - Set this to C(true) to retrieve a file's SHA1 checksum.
+ type: bool
+ default: no
+ use_regex:
+ description:
+ - If C(false), the patterns are file globs (shell).
+ - If C(true), they are python regexes.
+ type: bool
+ default: no
+ depth:
+ description:
+ - Set the maximum number of levels to descend into.
+ - Setting recurse to C(false) will override this value, which is effectively depth 1.
+ - Default is unlimited depth.
+ type: int
+ version_added: "2.6"
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ details: since this action does not modify the target it just executes normally during check mode
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+seealso:
+- module: ansible.windows.win_find
+'''
+
+
+EXAMPLES = r'''
+- name: Recursively find /tmp files older than 2 days
+ ansible.builtin.find:
+ paths: /tmp
+ age: 2d
+ recurse: yes
+
+- name: Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
+ ansible.builtin.find:
+ paths: /tmp
+ age: 4w
+ size: 1m
+ recurse: yes
+
+- name: Recursively find /var/tmp files with last access time greater than 3600 seconds
+ ansible.builtin.find:
+ paths: /var/tmp
+ age: 3600
+ age_stamp: atime
+ recurse: yes
+
+- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
+ ansible.builtin.find:
+ paths: /var/log
+ patterns: '*.old,*.log.gz'
+ size: 10m
+
+# Note that YAML double quotes require escaping backslashes but yaml single quotes do not.
+- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
+ ansible.builtin.find:
+ paths: /var/log
+ patterns: "^.*?\\.(?:old|log\\.gz)$"
+ size: 10m
+ use_regex: yes
+
+- name: Find /var/log all directories, exclude nginx and mysql
+ ansible.builtin.find:
+ paths: /var/log
+ recurse: no
+ file_type: directory
+ excludes: 'nginx,mysql'
+
+# When using patterns that contain a comma, make sure they are formatted as lists to avoid splitting the pattern
+- name: Use a single pattern that contains a comma formatted as a list
+ ansible.builtin.find:
+ paths: /var/log
+ file_type: file
+ use_regex: yes
+ patterns: ['^_[0-9]{2,4}_.*.log$']
+
+- name: Use multiple patterns that contain a comma formatted as a YAML list
+ ansible.builtin.find:
+ paths: /var/log
+ file_type: file
+ use_regex: yes
+ patterns:
+ - '^_[0-9]{2,4}_.*.log$'
+ - '^[a-z]{1,5}_.*log$'
+
+'''
+
+RETURN = r'''
+files:
+ description: All matches found with the specified criteria (see stat module for full output of each dictionary)
+ returned: success
+ type: list
+ sample: [
+ { path: "/var/tmp/test1",
+ mode: "0644",
+ "...": "...",
+ checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523
+ },
+ { path: "/var/tmp/test2",
+ "...": "..."
+ },
+ ]
+matched:
+ description: Number of matches
+ returned: success
+ type: int
+ sample: 14
+examined:
+ description: Number of filesystem objects looked at
+ returned: success
+ type: int
+ sample: 34
+skipped_paths:
+ description: skipped paths and reasons they were skipped
+ returned: success
+ type: dict
+ sample: {"/laskdfj": "'/laskdfj' is not a directory"}
+ version_added: '2.12'
+'''
+
+import fnmatch
+import grp
+import os
+import pwd
+import re
+import stat
+import time
+
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+def pfilter(f, patterns=None, excludes=None, use_regex=False):
+ '''filter using glob patterns'''
+ if not patterns and not excludes:
+ return True
+
+ if use_regex:
+ if patterns and not excludes:
+ for p in patterns:
+ r = re.compile(p)
+ if r.match(f):
+ return True
+
+ elif patterns and excludes:
+ for p in patterns:
+ r = re.compile(p)
+ if r.match(f):
+ for e in excludes:
+ r = re.compile(e)
+ if r.match(f):
+ return False
+ return True
+
+ else:
+ if patterns and not excludes:
+ for p in patterns:
+ if fnmatch.fnmatch(f, p):
+ return True
+
+ elif patterns and excludes:
+ for p in patterns:
+ if fnmatch.fnmatch(f, p):
+ for e in excludes:
+ if fnmatch.fnmatch(f, e):
+ return False
+ return True
+
+ return False
+
+
+def agefilter(st, now, age, timestamp):
+ '''filter files older than age'''
+ if age is None:
+ return True
+ elif age >= 0 and now - getattr(st, "st_%s" % timestamp) >= abs(age):
+ return True
+ elif age < 0 and now - getattr(st, "st_%s" % timestamp) <= abs(age):
+ return True
+ return False
+
+
+def sizefilter(st, size):
+ '''filter files greater than size'''
+ if size is None:
+ return True
+ elif size >= 0 and st.st_size >= abs(size):
+ return True
+ elif size < 0 and st.st_size <= abs(size):
+ return True
+ return False
+
+
+def contentfilter(fsname, pattern, read_whole_file=False):
+ """
+ Filter files which contain the given expression
+ :arg fsname: Filename to scan for lines matching a pattern
+ :arg pattern: Pattern to look for inside of line
+ :arg read_whole_file: If true, the whole file is read into memory before the regex is applied against it. Otherwise, the regex is applied line-by-line.
+ :rtype: bool
+ :returns: True if one of the lines in fsname matches the pattern. Otherwise False
+ """
+ if pattern is None:
+ return True
+
+ prog = re.compile(pattern)
+
+ try:
+ with open(fsname) as f:
+ if read_whole_file:
+ return bool(prog.search(f.read()))
+
+ for line in f:
+ if prog.match(line):
+ return True
+
+ except Exception:
+ pass
+
+ return False
+
+
+def statinfo(st):
+ pw_name = ""
+ gr_name = ""
+
+ try: # user data
+ pw_name = pwd.getpwuid(st.st_uid).pw_name
+ except Exception:
+ pass
+
+ try: # group data
+ gr_name = grp.getgrgid(st.st_gid).gr_name
+ except Exception:
+ pass
+
+ return {
+ 'mode': "%04o" % stat.S_IMODE(st.st_mode),
+ 'isdir': stat.S_ISDIR(st.st_mode),
+ 'ischr': stat.S_ISCHR(st.st_mode),
+ 'isblk': stat.S_ISBLK(st.st_mode),
+ 'isreg': stat.S_ISREG(st.st_mode),
+ 'isfifo': stat.S_ISFIFO(st.st_mode),
+ 'islnk': stat.S_ISLNK(st.st_mode),
+ 'issock': stat.S_ISSOCK(st.st_mode),
+ 'uid': st.st_uid,
+ 'gid': st.st_gid,
+ 'size': st.st_size,
+ 'inode': st.st_ino,
+ 'dev': st.st_dev,
+ 'nlink': st.st_nlink,
+ 'atime': st.st_atime,
+ 'mtime': st.st_mtime,
+ 'ctime': st.st_ctime,
+ 'gr_name': gr_name,
+ 'pw_name': pw_name,
+ 'wusr': bool(st.st_mode & stat.S_IWUSR),
+ 'rusr': bool(st.st_mode & stat.S_IRUSR),
+ 'xusr': bool(st.st_mode & stat.S_IXUSR),
+ 'wgrp': bool(st.st_mode & stat.S_IWGRP),
+ 'rgrp': bool(st.st_mode & stat.S_IRGRP),
+ 'xgrp': bool(st.st_mode & stat.S_IXGRP),
+ 'woth': bool(st.st_mode & stat.S_IWOTH),
+ 'roth': bool(st.st_mode & stat.S_IROTH),
+ 'xoth': bool(st.st_mode & stat.S_IXOTH),
+ 'isuid': bool(st.st_mode & stat.S_ISUID),
+ 'isgid': bool(st.st_mode & stat.S_ISGID),
+ }
+
+
+def handle_walk_errors(e):
+ raise e
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ paths=dict(type='list', required=True, aliases=['name', 'path'], elements='str'),
+ patterns=dict(type='list', default=[], aliases=['pattern'], elements='str'),
+ excludes=dict(type='list', aliases=['exclude'], elements='str'),
+ contains=dict(type='str'),
+ read_whole_file=dict(type='bool', default=False),
+ file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
+ age=dict(type='str'),
+ age_stamp=dict(type='str', default="mtime", choices=['atime', 'ctime', 'mtime']),
+ size=dict(type='str'),
+ recurse=dict(type='bool', default=False),
+ hidden=dict(type='bool', default=False),
+ follow=dict(type='bool', default=False),
+ get_checksum=dict(type='bool', default=False),
+ use_regex=dict(type='bool', default=False),
+ depth=dict(type='int'),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+
+ # Set the default match pattern to either a match-all glob or
+ # regex depending on use_regex being set. This makes sure if you
+ # set excludes: without a pattern pfilter gets something it can
+ # handle.
+ if not params['patterns']:
+ if params['use_regex']:
+ params['patterns'] = ['.*']
+ else:
+ params['patterns'] = ['*']
+
+ filelist = []
+ skipped = {}
+
+ if params['age'] is None:
+ age = None
+ else:
+ # convert age to seconds:
+ m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
+ seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
+ if m:
+ age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
+ else:
+ module.fail_json(age=params['age'], msg="failed to process age")
+
+ if params['size'] is None:
+ size = None
+ else:
+ # convert size to bytes:
+ m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
+ bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
+ if m:
+ size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
+ else:
+ module.fail_json(size=params['size'], msg="failed to process size")
+
+ now = time.time()
+ msg = 'All paths examined'
+ looked = 0
+ has_warnings = False
+ for npath in params['paths']:
+ npath = os.path.expanduser(os.path.expandvars(npath))
+ try:
+ if not os.path.isdir(npath):
+ raise Exception("'%s' is not a directory" % to_native(npath))
+
+ for root, dirs, files in os.walk(npath, onerror=handle_walk_errors, followlinks=params['follow']):
+ looked = looked + len(files) + len(dirs)
+ for fsobj in (files + dirs):
+ fsname = os.path.normpath(os.path.join(root, fsobj))
+ if params['depth']:
+ wpath = npath.rstrip(os.path.sep) + os.path.sep
+ depth = int(fsname.count(os.path.sep)) - int(wpath.count(os.path.sep)) + 1
+ if depth > params['depth']:
+ # Empty the list used by os.walk to avoid traversing deeper unnecessarily
+ del dirs[:]
+ continue
+ if os.path.basename(fsname).startswith('.') and not params['hidden']:
+ continue
+
+ try:
+ st = os.lstat(fsname)
+ except (IOError, OSError) as e:
+ module.warn("Skipped entry '%s' due to this access issue: %s\n" % (fsname, to_text(e)))
+ skipped[fsname] = to_text(e)
+ has_warnings = True
+ continue
+
+ r = {'path': fsname}
+ if params['file_type'] == 'any':
+ if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
+
+ r.update(statinfo(st))
+ if stat.S_ISREG(st.st_mode) and params['get_checksum']:
+ r['checksum'] = module.sha1(fsname)
+
+ if stat.S_ISREG(st.st_mode):
+ if sizefilter(st, size):
+ filelist.append(r)
+ else:
+ filelist.append(r)
+
+ elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
+ if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
+
+ r.update(statinfo(st))
+ filelist.append(r)
+
+ elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
+ if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and \
+ agefilter(st, now, age, params['age_stamp']) and \
+ sizefilter(st, size) and contentfilter(fsname, params['contains'], params['read_whole_file']):
+
+ r.update(statinfo(st))
+ if params['get_checksum']:
+ r['checksum'] = module.sha1(fsname)
+ filelist.append(r)
+
+ elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
+ if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
+
+ r.update(statinfo(st))
+ filelist.append(r)
+
+ if not params['recurse']:
+ break
+ except Exception as e:
+ skipped[npath] = to_text(e)
+ module.warn("Skipped '%s' path due to this access issue: %s\n" % (to_text(npath), skipped[npath]))
+ has_warnings = True
+
+ if has_warnings:
+ msg = 'Not all paths examined, check warnings for details'
+ matched = len(filelist)
+ module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked, skipped_paths=skipped)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/gather_facts.py b/lib/ansible/modules/gather_facts.py
new file mode 100644
index 0000000..b099cd8
--- /dev/null
+++ b/lib/ansible/modules/gather_facts.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gather_facts
+version_added: 2.8
+short_description: Gathers facts about remote hosts
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.facts
+ - action_common_attributes.flow
+description:
+ - This module takes care of executing the R(configured facts modules,FACTS_MODULES), the default is to use the M(ansible.builtin.setup) module.
+ - This module is automatically called by playbooks to gather useful variables about remote hosts that can be used in playbooks.
+ - It can also be executed directly by C(/usr/bin/ansible) to check what variables are available to a host.
+ - Ansible provides many I(facts) about the system, automatically.
+options:
+ parallel:
+ description:
+ - A toggle that controls if the fact modules are executed in parallel or serially and in order.
+ This can guarantee the merge order of module facts at the expense of performance.
+ - By default it will be true if more than one fact module is used.
+ type: bool
+attributes:
+ action:
+ support: full
+ async:
+ details: multiple modules can be executed in parallel or serially, but the action itself will not be async
+ support: partial
+ bypass_host_loop:
+ support: none
+ check_mode:
+ details: since this action should just query the target system info it always runs in check mode
+ support: full
+ diff_mode:
+ support: none
+ facts:
+ support: full
+ platform:
+ details: The action plugin should be able to automatically select the specific platform modules automatically or can be configured manually
+ platforms: all
+notes:
+ - This is mostly a wrapper around other fact gathering modules.
+ - Options passed into this action must be supported by all the underlying fact modules configured.
+ - Facts returned by each module will be merged, conflicts will favor 'last merged'.
+ Order is not guaranteed, when doing parallel gathering on multiple modules.
+author:
+ - "Ansible Core Team"
+'''
+
+RETURN = """
+# depends on the fact module called
+"""
+
+EXAMPLES = """
+# Display facts from all hosts and store them indexed by hostname at /tmp/facts.
+# ansible all -m ansible.builtin.gather_facts --tree /tmp/facts
+"""
diff --git a/lib/ansible/modules/get_url.py b/lib/ansible/modules/get_url.py
new file mode 100644
index 0000000..eec2424
--- /dev/null
+++ b/lib/ansible/modules/get_url.py
@@ -0,0 +1,706 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: get_url
+short_description: Downloads files from HTTP, HTTPS, or FTP to node
+description:
+ - Downloads files from HTTP, HTTPS, or FTP to the remote server. The remote
+ server I(must) have direct access to the remote resource.
+ - By default, if an environment variable C(<protocol>_proxy) is set on
+ the target host, requests will be sent through that proxy. This
+ behaviour can be overridden by setting a variable for this task
+ (see R(setting the environment,playbooks_environment)),
+ or by using the use_proxy option.
+ - HTTP redirects can redirect from HTTP to HTTPS so you should be sure that
+ your proxy environment for both protocols is correct.
+ - From Ansible 2.4 when run with C(--check), it will do a HEAD request to validate the URL but
+ will not download the entire file or verify it against hashes and will report incorrect changed status.
+ - For Windows targets, use the M(ansible.windows.win_get_url) module instead.
+version_added: '0.6'
+options:
+ ciphers:
+ description:
+ - SSL/TLS Ciphers to use for the request
+ - 'When a list is provided, all ciphers are joined in order with C(:)'
+ - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT)
+ for more details.
+ - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions
+ type: list
+ elements: str
+ version_added: '2.14'
+ decompress:
+ description:
+ - Whether to attempt to decompress gzip content-encoded responses
+ type: bool
+ default: true
+ version_added: '2.14'
+ url:
+ description:
+ - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
+ type: str
+ required: true
+ dest:
+ description:
+ - Absolute path of where to download the file to.
+ - If C(dest) is a directory, either the server provided filename or, if
+ none provided, the base name of the URL on the remote server will be
+ used. If a directory, C(force) has no effect.
+ - If C(dest) is a directory, the file will always be downloaded
+ (regardless of the C(force) and C(checksum) option), but
+ replaced only if the contents changed.
+ type: path
+ required: true
+ tmp_dest:
+ description:
+ - Absolute path of where temporary file is downloaded to.
+ - When run on Ansible 2.5 or greater, path defaults to ansible's remote_tmp setting
+ - When run on Ansible prior to 2.5, it defaults to C(TMPDIR), C(TEMP) or C(TMP) env variables or a platform specific value.
+ - U(https://docs.python.org/3/library/tempfile.html#tempfile.tempdir)
+ type: path
+ version_added: '2.1'
+ force:
+ description:
+ - If C(true) and C(dest) is not a directory, will download the file every
+ time and replace the file if the contents change. If C(false), the file
+ will only be downloaded if the destination does not exist. Generally
+ should be C(true) only for small local files.
+ - Prior to 0.6, this module behaved as if C(true) was the default.
+ type: bool
+ default: no
+ version_added: '0.7'
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ version_added: '2.1'
+ checksum:
+ description:
+ - 'If a checksum is passed to this parameter, the digest of the
+ destination file will be calculated after it is downloaded to ensure
+ its integrity and verify that the transfer completed successfully.
+ Format: <algorithm>:<checksum|url>, e.g. checksum="sha256:D98291AC[...]B6DC7B97",
+ checksum="sha256:http://example.com/path/sha256sum.txt"'
+ - If you worry about portability, only the sha1 algorithm is available
+ on all platforms and python versions.
+ - The third party hashlib library can be installed for access to additional algorithms.
+ - Additionally, if a checksum is passed to this parameter, and the file exist under
+ the C(dest) location, the I(destination_checksum) would be calculated, and if
+ checksum equals I(destination_checksum), the file download would be skipped
+ (unless C(force) is true). If the checksum does not equal I(destination_checksum),
+ the destination file is deleted.
+ type: str
+ default: ''
+ version_added: "2.0"
+ use_proxy:
+ description:
+ - if C(false), it will not use a proxy, even if one is defined in
+ an environment variable on the target hosts.
+ type: bool
+ default: yes
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated.
+ - This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ timeout:
+ description:
+ - Timeout in seconds for URL request.
+ type: int
+ default: 10
+ version_added: '1.8'
+ headers:
+ description:
+ - Add custom HTTP headers to a request in hash/dict format.
+ - The hash/dict format was added in Ansible 2.6.
+ - Previous versions used a C("key:value,key:value") string format.
+ - The C("key:value,key:value") string format is deprecated and has been removed in version 2.10.
+ type: dict
+ version_added: '2.0'
+ url_username:
+ description:
+ - The username for use in HTTP basic authentication.
+ - This parameter can be used without C(url_password) for sites that allow empty passwords.
+ - Since version 2.8 you can also use the C(username) alias for this option.
+ type: str
+ aliases: ['username']
+ version_added: '1.6'
+ url_password:
+ description:
+ - The password for use in HTTP basic authentication.
+ - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used.
+ - Since version 2.8 you can also use the 'password' alias for this option.
+ type: str
+ aliases: ['password']
+ version_added: '1.6'
+ force_basic_auth:
+ description:
+ - Force the sending of the Basic authentication header upon initial request.
+ - httplib2, the library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail.
+ type: bool
+ default: no
+ version_added: '2.0'
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - This file can also include the key as well, and if the key is included, C(client_key) is not required.
+ type: path
+ version_added: '2.4'
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - If C(client_cert) contains both the certificate and key, this option is not required.
+ type: path
+ version_added: '2.4'
+ http_agent:
+ description:
+ - Header to identify as, generally appears in web server logs.
+ type: str
+ default: ansible-httpget
+ unredirected_headers:
+ description:
+ - A list of header names that will not be sent on subsequent redirected requests. This list is case
+ insensitive. By default all headers will be redirected. In some cases it may be beneficial to list
+ headers such as C(Authorization) here to avoid potential credential exposure.
+ default: []
+ type: list
+ elements: str
+ version_added: '2.12'
+ use_gssapi:
+ description:
+ - Use GSSAPI to perform the authentication, typically this is for Kerberos or Kerberos through Negotiate
+ authentication.
+ - Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
+ - Credentials for GSSAPI can be specified with I(url_username)/I(url_password) or with the GSSAPI env var
+ C(KRB5CCNAME) that specified a custom Kerberos credential cache.
+ - NTLM authentication is I(not) supported even if the GSSAPI mech for NTLM has been installed.
+ type: bool
+ default: no
+ version_added: '2.11'
+ use_netrc:
+ description:
+ - Determining whether to use credentials from ``~/.netrc`` file
+ - By default .netrc is used with Basic authentication headers
+ - When set to False, .netrc credentials are ignored
+ type: bool
+ default: true
+ version_added: '2.14'
+# informational: requirements for nodes
+extends_documentation_fragment:
+ - files
+ - action_common_attributes
+attributes:
+ check_mode:
+ details: the changed status will reflect comparison to an empty source file
+ support: partial
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - For Windows targets, use the M(ansible.windows.win_get_url) module instead.
+seealso:
+- module: ansible.builtin.uri
+- module: ansible.windows.win_get_url
+author:
+- Jan-Piet Mens (@jpmens)
+'''
+
+EXAMPLES = r'''
+- name: Download foo.conf
+ ansible.builtin.get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ mode: '0440'
+
+- name: Download file and force basic auth
+ ansible.builtin.get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ force_basic_auth: yes
+
+- name: Download file with custom HTTP headers
+ ansible.builtin.get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ headers:
+ key1: one
+ key2: two
+
+- name: Download file with check (sha256)
+ ansible.builtin.get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ checksum: sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
+
+- name: Download file with check (md5)
+ ansible.builtin.get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ checksum: md5:66dffb5228a211e61d6d7ef4a86f5758
+
+- name: Download file with checksum url (sha256)
+ ansible.builtin.get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ checksum: sha256:http://example.com/path/sha256sum.txt
+
+- name: Download file from a file path
+ ansible.builtin.get_url:
+ url: file:///tmp/afile.txt
+ dest: /tmp/afilecopy.txt
+
+- name: < Fetch file that requires authentication.
+ username/password only available since 2.8, in older versions you need to use url_username/url_password
+ ansible.builtin.get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ username: bar
+ password: '{{ mysecret }}'
+'''
+
+RETURN = r'''
+backup_file:
+ description: name of backup file created after download
+ returned: changed and if backup=yes
+ type: str
+ sample: /path/to/file.txt.2015-02-12@22:09~
+checksum_dest:
+ description: sha1 checksum of the file after copy
+ returned: success
+ type: str
+ sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
+checksum_src:
+ description: sha1 checksum of the file
+ returned: success
+ type: str
+ sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
+dest:
+ description: destination file/path
+ returned: success
+ type: str
+ sample: /path/to/file.txt
+elapsed:
+ description: The number of seconds that elapsed while performing the download
+ returned: always
+ type: int
+ sample: 23
+gid:
+ description: group id of the file
+ returned: success
+ type: int
+ sample: 100
+group:
+ description: group of the file
+ returned: success
+ type: str
+ sample: "httpd"
+md5sum:
+ description: md5 checksum of the file after download
+ returned: when supported
+ type: str
+ sample: "2a5aeecc61dc98c4d780b14b330e3282"
+mode:
+ description: permissions of the target
+ returned: success
+ type: str
+ sample: "0644"
+msg:
+ description: the HTTP message from the request
+ returned: always
+ type: str
+ sample: OK (unknown bytes)
+owner:
+ description: owner of the file
+ returned: success
+ type: str
+ sample: httpd
+secontext:
+ description: the SELinux security context of the file
+ returned: success
+ type: str
+ sample: unconfined_u:object_r:user_tmp_t:s0
+size:
+ description: size of the target
+ returned: success
+ type: int
+ sample: 1220
+src:
+ description: source file used after download
+ returned: always
+ type: str
+ sample: /tmp/tmpAdFLdV
+state:
+ description: state of the target
+ returned: success
+ type: str
+ sample: file
+status_code:
+ description: the HTTP status code from the request
+ returned: always
+ type: int
+ sample: 200
+uid:
+ description: owner id of the file, after execution
+ returned: success
+ type: int
+ sample: 100
+url:
+ description: the actual URL used for the request
+ returned: always
+ type: str
+ sample: https://www.ansible.com/
+'''
+
+import datetime
+import os
+import re
+import shutil
+import tempfile
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlsplit
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+
+# ==============================================================
+# url handling
+
+
+def url_filename(url):
+ fn = os.path.basename(urlsplit(url)[2])
+ if fn == '':
+ return 'index.html'
+ return fn
+
+
+def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None, tmp_dest='', method='GET', unredirected_headers=None,
+ decompress=True, ciphers=None, use_netrc=True):
+ """
+ Download data from the url and store in a temporary file.
+
+ Return (tempfile, info about the request)
+ """
+
+ start = datetime.datetime.utcnow()
+ rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, headers=headers, method=method,
+ unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc)
+ elapsed = (datetime.datetime.utcnow() - start).seconds
+
+ if info['status'] == 304:
+ module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''), status_code=info['status'], elapsed=elapsed)
+
+ # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
+ if info['status'] == -1:
+ module.fail_json(msg=info['msg'], url=url, dest=dest, elapsed=elapsed)
+
+ if info['status'] != 200 and not url.startswith('file:/') and not (url.startswith('ftp:/') and info.get('msg', '').startswith('OK')):
+ module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest, elapsed=elapsed)
+
+ # create a temporary file and copy content to do checksum-based replacement
+ if tmp_dest:
+ # tmp_dest should be an existing dir
+ tmp_dest_is_dir = os.path.isdir(tmp_dest)
+ if not tmp_dest_is_dir:
+ if os.path.exists(tmp_dest):
+ module.fail_json(msg="%s is a file but should be a directory." % tmp_dest, elapsed=elapsed)
+ else:
+ module.fail_json(msg="%s directory does not exist." % tmp_dest, elapsed=elapsed)
+ else:
+ tmp_dest = module.tmpdir
+
+ fd, tempname = tempfile.mkstemp(dir=tmp_dest)
+
+ f = os.fdopen(fd, 'wb')
+ try:
+ shutil.copyfileobj(rsp, f)
+ except Exception as e:
+ os.remove(tempname)
+ module.fail_json(msg="failed to create temporary content file: %s" % to_native(e), elapsed=elapsed, exception=traceback.format_exc())
+ f.close()
+ rsp.close()
+ return tempname, info
+
+
+def extract_filename_from_headers(headers):
+ """
+ Extracts a filename from the given dict of HTTP headers.
+
+ Looks for the content-disposition header and applies a regex.
+ Returns the filename if successful, else None."""
+ cont_disp_regex = 'attachment; ?filename="?([^"]+)'
+ res = None
+
+ if 'content-disposition' in headers:
+ cont_disp = headers['content-disposition']
+ match = re.match(cont_disp_regex, cont_disp)
+ if match:
+ res = match.group(1)
+ # Try preventing any funny business.
+ res = os.path.basename(res)
+
+ return res
+
+
+def is_url(checksum):
+ """
+ Returns True if checksum value has supported URL scheme, else False."""
+ supported_schemes = ('http', 'https', 'ftp', 'file')
+
+ return urlsplit(checksum).scheme in supported_schemes
+
+
+# ==============================================================
+# main
+
+def main():
+ argument_spec = url_argument_spec()
+
+ # setup aliases
+ argument_spec['url_username']['aliases'] = ['username']
+ argument_spec['url_password']['aliases'] = ['password']
+
+ argument_spec.update(
+ url=dict(type='str', required=True),
+ dest=dict(type='path', required=True),
+ backup=dict(type='bool', default=False),
+ checksum=dict(type='str', default=''),
+ timeout=dict(type='int', default=10),
+ headers=dict(type='dict'),
+ tmp_dest=dict(type='path'),
+ unredirected_headers=dict(type='list', elements='str', default=[]),
+ decompress=dict(type='bool', default=True),
+ ciphers=dict(type='list', elements='str'),
+ use_netrc=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ url = module.params['url']
+ dest = module.params['dest']
+ backup = module.params['backup']
+ force = module.params['force']
+ checksum = module.params['checksum']
+ use_proxy = module.params['use_proxy']
+ timeout = module.params['timeout']
+ headers = module.params['headers']
+ tmp_dest = module.params['tmp_dest']
+ unredirected_headers = module.params['unredirected_headers']
+ decompress = module.params['decompress']
+ ciphers = module.params['ciphers']
+ use_netrc = module.params['use_netrc']
+
+ result = dict(
+ changed=False,
+ checksum_dest=None,
+ checksum_src=None,
+ dest=dest,
+ elapsed=0,
+ url=url,
+ )
+
+ dest_is_dir = os.path.isdir(dest)
+ last_mod_time = None
+
+ # checksum specified, parse for algorithm and checksum
+ if checksum:
+ try:
+ algorithm, checksum = checksum.split(':', 1)
+ except ValueError:
+ module.fail_json(msg="The checksum parameter has to be in format <algorithm>:<checksum>", **result)
+
+ if is_url(checksum):
+ checksum_url = checksum
+ # download checksum file to checksum_tmpsrc
+ checksum_tmpsrc, checksum_info = url_get(module, checksum_url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest,
+ unredirected_headers=unredirected_headers, ciphers=ciphers, use_netrc=use_netrc)
+ with open(checksum_tmpsrc) as f:
+ lines = [line.rstrip('\n') for line in f]
+ os.remove(checksum_tmpsrc)
+ checksum_map = []
+ filename = url_filename(url)
+ if len(lines) == 1 and len(lines[0].split()) == 1:
+ # Only a single line with a single string
+ # treat it as a checksum only file
+ checksum_map.append((lines[0], filename))
+ else:
+ # The assumption here is the file is in the format of
+ # checksum filename
+ for line in lines:
+ # Split by one whitespace to keep the leading type char ' ' (whitespace) for text and '*' for binary
+ parts = line.split(" ", 1)
+ if len(parts) == 2:
+ # Remove the leading type char, we expect
+ if parts[1].startswith((" ", "*",)):
+ parts[1] = parts[1][1:]
+
+ # Append checksum and path without potential leading './'
+ checksum_map.append((parts[0], parts[1].lstrip("./")))
+
+ # Look through each line in the checksum file for a hash corresponding to
+ # the filename in the url, returning the first hash that is found.
+ for cksum in (s for (s, f) in checksum_map if f == filename):
+ checksum = cksum
+ break
+ else:
+ checksum = None
+
+ if checksum is None:
+ module.fail_json(msg="Unable to find a checksum for file '%s' in '%s'" % (filename, checksum_url))
+ # Remove any non-alphanumeric characters, including the infamous
+ # Unicode zero-width space
+ checksum = re.sub(r'\W+', '', checksum).lower()
+ # Ensure the checksum portion is a hexdigest
+ try:
+ int(checksum, 16)
+ except ValueError:
+ module.fail_json(msg='The checksum format is invalid', **result)
+
+ if not dest_is_dir and os.path.exists(dest):
+ checksum_mismatch = False
+
+ # If the download is not forced and there is a checksum, allow
+ # checksum match to skip the download.
+ if not force and checksum != '':
+ destination_checksum = module.digest_from_file(dest, algorithm)
+
+ if checksum != destination_checksum:
+ checksum_mismatch = True
+
+ # Not forcing redownload, unless checksum does not match
+ if not force and checksum and not checksum_mismatch:
+ # Not forcing redownload, unless checksum does not match
+ # allow file attribute changes
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ result['changed'] = module.set_fs_attributes_if_different(file_args, False)
+ if result['changed']:
+ module.exit_json(msg="file already exists but file attributes changed", **result)
+ module.exit_json(msg="file already exists", **result)
+
+ # If the file already exists, prepare the last modified time for the
+ # request.
+ mtime = os.path.getmtime(dest)
+ last_mod_time = datetime.datetime.utcfromtimestamp(mtime)
+
+ # If the checksum does not match we have to force the download
+ # because last_mod_time may be newer than on remote
+ if checksum_mismatch:
+ force = True
+
+ # download to tmpsrc
+ start = datetime.datetime.utcnow()
+ method = 'HEAD' if module.check_mode else 'GET'
+ tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest, method,
+ unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc)
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ result['src'] = tmpsrc
+
+ # Now the request has completed, we can finally generate the final
+ # destination file name from the info dict.
+
+ if dest_is_dir:
+ filename = extract_filename_from_headers(info)
+ if not filename:
+ # Fall back to extracting the filename from the URL.
+ # Pluck the URL from the info, since a redirect could have changed
+ # it.
+ filename = url_filename(info['url'])
+ dest = os.path.join(dest, filename)
+ result['dest'] = dest
+
+ # raise an error if there is no tmpsrc file
+ if not os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], **result)
+ if not os.access(tmpsrc, os.R_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Source %s is not readable" % (tmpsrc), **result)
+ result['checksum_src'] = module.sha1(tmpsrc)
+
+ # check if there is no dest file
+ if os.path.exists(dest):
+ # raise an error if copy has no permission on dest
+ if not os.access(dest, os.W_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s is not writable" % (dest), **result)
+ if not os.access(dest, os.R_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s is not readable" % (dest), **result)
+ result['checksum_dest'] = module.sha1(dest)
+ else:
+ if not os.path.exists(os.path.dirname(dest)):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s does not exist" % (os.path.dirname(dest)), **result)
+ if not os.access(os.path.dirname(dest), os.W_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s is not writable" % (os.path.dirname(dest)), **result)
+
+ if module.check_mode:
+ if os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ result['changed'] = ('checksum_dest' not in result or
+ result['checksum_src'] != result['checksum_dest'])
+ module.exit_json(msg=info.get('msg', ''), **result)
+
+ backup_file = None
+ if result['checksum_src'] != result['checksum_dest']:
+ try:
+ if backup:
+ if os.path.exists(dest):
+ backup_file = module.backup_local(dest)
+ module.atomic_move(tmpsrc, dest, unsafe_writes=module.params['unsafe_writes'])
+ except Exception as e:
+ if os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)),
+ exception=traceback.format_exc(), **result)
+ result['changed'] = True
+ else:
+ result['changed'] = False
+ if os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+
+ if checksum != '':
+ destination_checksum = module.digest_from_file(dest, algorithm)
+
+ if checksum != destination_checksum:
+ os.remove(dest)
+ module.fail_json(msg="The checksum for %s did not match %s; it was %s." % (dest, checksum, destination_checksum), **result)
+
+ # allow file attribute changes
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'])
+
+ # Backwards compat only. We'll return None on FIPS enabled systems
+ try:
+ result['md5sum'] = module.md5(dest)
+ except ValueError:
+ result['md5sum'] = None
+
+ if backup_file:
+ result['backup_file'] = backup_file
+
+ # Mission complete
+ module.exit_json(msg=info.get('msg', ''), status_code=info.get('status', ''), **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/getent.py b/lib/ansible/modules/getent.py
new file mode 100644
index 0000000..1f76380
--- /dev/null
+++ b/lib/ansible/modules/getent.py
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Brian Coca <brian.coca+dev@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: getent
+short_description: A wrapper to the unix getent utility
+description:
+ - Runs getent against one of it's various databases and returns information into
+ the host's facts, in a getent_<database> prefixed variable.
+version_added: "1.8"
+options:
+ database:
+ description:
+ - The name of a getent database supported by the target system (passwd, group,
+ hosts, etc).
+ type: str
+ required: True
+ key:
+ description:
+ - Key from which to return values from the specified database, otherwise the
+ full contents are returned.
+ type: str
+ default: ''
+ service:
+ description:
+ - Override all databases with the specified service
+ - The underlying system must support the service flag which is not always available.
+ type: str
+ version_added: "2.9"
+ split:
+ description:
+ - Character used to split the database values into lists/arrays such as C(:) or C(\t),
+ otherwise it will try to pick one depending on the database.
+ type: str
+ fail_key:
+ description:
+ - If a supplied key is missing this will make the task fail if C(true).
+ type: bool
+ default: 'yes'
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.facts
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ facts:
+ support: full
+ platform:
+ platforms: posix
+notes:
+ - Not all databases support enumeration, check system documentation for details.
+author:
+- Brian Coca (@bcoca)
+'''
+
+EXAMPLES = '''
+- name: Get root user info
+ ansible.builtin.getent:
+ database: passwd
+ key: root
+- ansible.builtin.debug:
+ var: ansible_facts.getent_passwd
+
+- name: Get all groups
+ ansible.builtin.getent:
+ database: group
+ split: ':'
+- ansible.builtin.debug:
+ var: ansible_facts.getent_group
+
+- name: Get all hosts, split by tab
+ ansible.builtin.getent:
+ database: hosts
+- ansible.builtin.debug:
+ var: ansible_facts.getent_hosts
+
+- name: Get http service info, no error if missing
+ ansible.builtin.getent:
+ database: services
+ key: http
+ fail_key: False
+- ansible.builtin.debug:
+ var: ansible_facts.getent_services
+
+- name: Get user password hash (requires sudo/root)
+ ansible.builtin.getent:
+ database: shadow
+ key: www-data
+ split: ':'
+- ansible.builtin.debug:
+ var: ansible_facts.getent_shadow
+
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Facts to add to ansible_facts.
+ returned: always
+ type: dict
+ contains:
+ getent_<database>:
+ description:
+ - A list of results or a single result as a list of the fields the db provides
+ - The list elements depend on the database queried, see getent man page for the structure
+ - Starting at 2.11 it now returns multiple duplicate entries, previouslly it only returned the last one
+ returned: always
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ database=dict(type='str', required=True),
+ key=dict(type='str', no_log=False),
+ service=dict(type='str'),
+ split=dict(type='str'),
+ fail_key=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ colon = ['passwd', 'shadow', 'group', 'gshadow']
+
+ database = module.params['database']
+ key = module.params.get('key')
+ split = module.params.get('split')
+ service = module.params.get('service')
+ fail_key = module.params.get('fail_key')
+
+ getent_bin = module.get_bin_path('getent', True)
+
+ if key is not None:
+ cmd = [getent_bin, database, key]
+ else:
+ cmd = [getent_bin, database]
+
+ if service is not None:
+ cmd.extend(['-s', service])
+
+ if split is None and database in colon:
+ split = ':'
+
+ try:
+ rc, out, err = module.run_command(cmd)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ msg = "Unexpected failure!"
+ dbtree = 'getent_%s' % database
+ results = {dbtree: {}}
+
+ if rc == 0:
+ seen = {}
+ for line in out.splitlines():
+ record = line.split(split)
+
+ if record[0] in seen:
+ # more than one result for same key, ensure we store in a list
+ if seen[record[0]] == 1:
+ results[dbtree][record[0]] = [results[dbtree][record[0]]]
+
+ results[dbtree][record[0]].append(record[1:])
+ seen[record[0]] += 1
+ else:
+ # new key/value, just assign
+ results[dbtree][record[0]] = record[1:]
+ seen[record[0]] = 1
+
+ module.exit_json(ansible_facts=results)
+
+ elif rc == 1:
+ msg = "Missing arguments, or database unknown."
+ elif rc == 2:
+ msg = "One or more supplied key could not be found in the database."
+ if not fail_key:
+ results[dbtree][key] = None
+ module.exit_json(ansible_facts=results, msg=msg)
+ elif rc == 3:
+ msg = "Enumeration not supported on this database."
+
+ module.fail_json(msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/git.py b/lib/ansible/modules/git.py
new file mode 100644
index 0000000..37477b3
--- /dev/null
+++ b/lib/ansible/modules/git.py
@@ -0,0 +1,1418 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: git
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+version_added: "0.0.1"
+short_description: Deploy software (or files) from git checkouts
+description:
+ - Manage I(git) checkouts of repositories to deploy files or software.
+extends_documentation_fragment: action_common_attributes
+options:
+ repo:
+ description:
+ - git, SSH, or HTTP(S) protocol address of the git repository.
+ type: str
+ required: true
+ aliases: [ name ]
+ dest:
+ description:
+ - The path of where the repository should be checked out. This
+ is equivalent to C(git clone [repo_url] [directory]). The repository
+ named in I(repo) is not appended to this path and the destination directory must be empty. This
+ parameter is required, unless I(clone) is set to C(false).
+ type: path
+ required: true
+ version:
+ description:
+ - What version of the repository to check out. This can be
+ the literal string C(HEAD), a branch name, a tag name.
+ It can also be a I(SHA-1) hash, in which case I(refspec) needs
+ to be specified if the given revision is not already available.
+ type: str
+ default: "HEAD"
+ accept_hostkey:
+ description:
+ - Will ensure or not that "-o StrictHostKeyChecking=no" is present as an ssh option.
+ - Be aware that this disables a protection against MITM attacks.
+ - Those using OpenSSH >= 7.5 might want to set I(ssh_opt) to 'StrictHostKeyChecking=accept-new'
+ instead, it does not remove the MITM issue but it does restrict it to the first attempt.
+ type: bool
+ default: 'no'
+ version_added: "1.5"
+ accept_newhostkey:
+ description:
+ - As of OpenSSH 7.5, "-o StrictHostKeyChecking=accept-new" can be
+ used which is safer and will only accepts host keys which are
+ not present or are the same. if C(true), ensure that
+ "-o StrictHostKeyChecking=accept-new" is present as an ssh option.
+ type: bool
+ default: 'no'
+ version_added: "2.12"
+ ssh_opts:
+ description:
+ - Options git will pass to ssh when used as protocol, it works via C(git)'s
+ GIT_SSH/GIT_SSH_COMMAND environment variables.
+ - For older versions it appends GIT_SSH_OPTS (specific to this module) to the
+ variables above or via a wrapper script.
+ - Other options can add to this list, like I(key_file) and I(accept_hostkey).
+ - An example value could be "-o StrictHostKeyChecking=no" (although this particular
+ option is better set by I(accept_hostkey)).
+ - The module ensures that 'BatchMode=yes' is always present to avoid prompts.
+ type: str
+ version_added: "1.5"
+
+ key_file:
+ description:
+ - Specify an optional private key file path, on the target host, to use for the checkout.
+ - This ensures 'IdentitiesOnly=yes' is present in ssh_opts.
+ type: path
+ version_added: "1.5"
+ reference:
+ description:
+ - Reference repository (see "git clone --reference ...").
+ version_added: "1.4"
+ remote:
+ description:
+ - Name of the remote.
+ type: str
+ default: "origin"
+ refspec:
+ description:
+ - Add an additional refspec to be fetched.
+ If version is set to a I(SHA-1) not reachable from any branch
+ or tag, this option may be necessary to specify the ref containing
+ the I(SHA-1).
+ Uses the same syntax as the C(git fetch) command.
+ An example value could be "refs/meta/config".
+ type: str
+ version_added: "1.9"
+ force:
+ description:
+ - If C(true), any modified files in the working
+ repository will be discarded. Prior to 0.7, this was always
+ C(true) and could not be disabled. Prior to 1.9, the default was
+ C(true).
+ type: bool
+ default: 'no'
+ version_added: "0.7"
+ depth:
+ description:
+ - Create a shallow clone with a history truncated to the specified
+ number or revisions. The minimum possible value is C(1), otherwise
+ ignored. Needs I(git>=1.9.1) to work correctly.
+ type: int
+ version_added: "1.2"
+ clone:
+ description:
+ - If C(false), do not clone the repository even if it does not exist locally.
+ type: bool
+ default: 'yes'
+ version_added: "1.9"
+ update:
+ description:
+ - If C(false), do not retrieve new revisions from the origin repository.
+ - Operations like archive will work on the existing (old) repository and might
+ not respond to changes to the options version or remote.
+ type: bool
+ default: 'yes'
+ version_added: "1.2"
+ executable:
+ description:
+ - Path to git executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+ type: path
+ version_added: "1.4"
+ bare:
+ description:
+ - If C(true), repository will be created as a bare repo, otherwise
+ it will be a standard repo with a workspace.
+ type: bool
+ default: 'no'
+ version_added: "1.4"
+ umask:
+ description:
+ - The umask to set before doing any checkouts, or any other
+ repository maintenance.
+ type: raw
+ version_added: "2.2"
+
+ recursive:
+ description:
+ - If C(false), repository will be cloned without the --recursive
+ option, skipping sub-modules.
+ type: bool
+ default: 'yes'
+ version_added: "1.6"
+
+ single_branch:
+ description:
+ - Clone only the history leading to the tip of the specified revision.
+ type: bool
+ default: 'no'
+ version_added: '2.11'
+
+ track_submodules:
+ description:
+ - If C(true), submodules will track the latest commit on their
+ master branch (or other branch specified in .gitmodules). If
+ C(false), submodules will be kept at the revision specified by the
+ main project. This is equivalent to specifying the --remote flag
+ to git submodule update.
+ type: bool
+ default: 'no'
+ version_added: "1.8"
+
+ verify_commit:
+ description:
+ - If C(true), when cloning or checking out a I(version) verify the
+ signature of a GPG signed commit. This requires git version>=2.1.0
+ to be installed. The commit MUST be signed and the public key MUST
+ be present in the GPG keyring.
+ type: bool
+ default: 'no'
+ version_added: "2.0"
+
+ archive:
+ description:
+ - Specify archive file path with extension. If specified, creates an
+ archive file of the specified format containing the tree structure
+ for the source tree.
+ Allowed archive formats ["zip", "tar.gz", "tar", "tgz"].
+ - This will clone and perform git archive from local directory as not
+ all git servers support git archive.
+ type: path
+ version_added: "2.4"
+
+ archive_prefix:
+ description:
+ - Specify a prefix to add to each file path in archive. Requires I(archive) to be specified.
+ version_added: "2.10"
+ type: str
+
+ separate_git_dir:
+ description:
+ - The path to place the cloned repository. If specified, Git repository
+ can be separated from working tree.
+ type: path
+ version_added: "2.7"
+
+ gpg_whitelist:
+ description:
+ - A list of trusted GPG fingerprints to compare to the fingerprint of the
+ GPG-signed commit.
+ - Only used when I(verify_commit=yes).
+ - Use of this feature requires Git 2.6+ due to its reliance on git's C(--raw) flag to C(verify-commit) and C(verify-tag).
+ type: list
+ elements: str
+ default: []
+ version_added: "2.9"
+
+requirements:
+ - git>=1.7.1 (the command line tool)
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix
+notes:
+ - "If the task seems to be hanging, first verify remote host is in C(known_hosts).
+ SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
+ one solution is to use the option accept_hostkey. Another solution is to
+ add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
+ the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
+'''
+
+EXAMPLES = '''
+- name: Git checkout
+ ansible.builtin.git:
+ repo: 'https://foosball.example.org/path/to/repo.git'
+ dest: /srv/checkout
+ version: release-0.22
+
+- name: Read-write git checkout from github
+ ansible.builtin.git:
+ repo: git@github.com:mylogin/hello.git
+ dest: /home/mylogin/hello
+
+- name: Just ensuring the repo checkout exists
+ ansible.builtin.git:
+ repo: 'https://foosball.example.org/path/to/repo.git'
+ dest: /srv/checkout
+ update: no
+
+- name: Just get information about the repository whether or not it has already been cloned locally
+ ansible.builtin.git:
+ repo: 'https://foosball.example.org/path/to/repo.git'
+ dest: /srv/checkout
+ clone: no
+ update: no
+
+- name: Checkout a github repo and use refspec to fetch all pull requests
+ ansible.builtin.git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: /src/ansible-examples
+ refspec: '+refs/pull/*:refs/heads/*'
+
+- name: Create git archive from repo
+ ansible.builtin.git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: /src/ansible-examples
+ archive: /tmp/ansible-examples.zip
+
+- name: Clone a repo with separate git directory
+ ansible.builtin.git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: /src/ansible-examples
+ separate_git_dir: /src/ansible-examples.git
+
+- name: Example clone of a single branch
+ ansible.builtin.git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: /src/ansible-examples
+ single_branch: yes
+ version: master
+
+- name: Avoid hanging when http(s) password is missing
+ ansible.builtin.git:
+ repo: https://github.com/ansible/could-be-a-private-repo
+ dest: /src/from-private-repo
+ environment:
+ GIT_TERMINAL_PROMPT: 0 # reports "terminal prompts disabled" on missing password
+ # or GIT_ASKPASS: /bin/true # for git before version 2.3.0, reports "Authentication failed" on missing password
+'''
+
+RETURN = '''
+after:
+ description: Last commit revision of the repository retrieved during the update.
+ returned: success
+ type: str
+ sample: 4c020102a9cd6fe908c9a4a326a38f972f63a903
+before:
+ description: Commit revision before the repository was updated, "null" for new repository.
+ returned: success
+ type: str
+ sample: 67c04ebe40a003bda0efb34eacfb93b0cafdf628
+remote_url_changed:
+ description: Contains True or False whether or not the remote URL was changed.
+ returned: success
+ type: bool
+ sample: True
+warnings:
+ description: List of warnings if requested features were not available due to a too old git version.
+ returned: error
+ type: str
+ sample: git version is too old to fully support the depth argument. Falling back to full checkouts.
+git_dir_now:
+ description: Contains the new path of .git directory if it is changed.
+ returned: success
+ type: str
+ sample: /path/to/new/git/dir
+git_dir_before:
+ description: Contains the original path of .git directory if it is changed.
+ returned: success
+ type: str
+ sample: /path/to/old/git/dir
+'''
+
+import filecmp
+import os
+import re
+import shlex
+import stat
+import sys
+import shutil
+import tempfile
+from ansible.module_utils.compat.version import LooseVersion
+
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.six import b, string_types
+
+
+def relocate_repo(module, result, repo_dir, old_repo_dir, worktree_dir):
+ if os.path.exists(repo_dir):
+ module.fail_json(msg='Separate-git-dir path %s already exists.' % repo_dir)
+ if worktree_dir:
+ dot_git_file_path = os.path.join(worktree_dir, '.git')
+ try:
+ shutil.move(old_repo_dir, repo_dir)
+ with open(dot_git_file_path, 'w') as dot_git_file:
+ dot_git_file.write('gitdir: %s' % repo_dir)
+ result['git_dir_before'] = old_repo_dir
+ result['git_dir_now'] = repo_dir
+ except (IOError, OSError) as err:
+ # if we already moved the .git dir, roll it back
+ if os.path.exists(repo_dir):
+ shutil.move(repo_dir, old_repo_dir)
+ module.fail_json(msg=u'Unable to move git dir. %s' % to_text(err))
+
+
+def head_splitter(headfile, remote, module=None, fail_on_error=False):
+ '''Extract the head reference'''
+ # https://github.com/ansible/ansible-modules-core/pull/907
+
+ res = None
+ if os.path.exists(headfile):
+ rawdata = None
+ try:
+ f = open(headfile, 'r')
+ rawdata = f.readline()
+ f.close()
+ except Exception:
+ if fail_on_error and module:
+ module.fail_json(msg="Unable to read %s" % headfile)
+ if rawdata:
+ try:
+ rawdata = rawdata.replace('refs/remotes/%s' % remote, '', 1)
+ refparts = rawdata.split(' ')
+ newref = refparts[-1]
+ nrefparts = newref.split('/', 2)
+ res = nrefparts[-1].rstrip('\n')
+ except Exception:
+ if fail_on_error and module:
+ module.fail_json(msg="Unable to split head from '%s'" % rawdata)
+ return res
+
+
+def unfrackgitpath(path):
+ if path is None:
+ return None
+
+ # copied from ansible.utils.path
+ return os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(path))))
+
+
+def get_submodule_update_params(module, git_path, cwd):
+ # or: git submodule [--quiet] update [--init] [-N|--no-fetch]
+ # [-f|--force] [--rebase] [--reference <repository>] [--merge]
+ # [--recursive] [--] [<path>...]
+
+ params = []
+
+ # run a bad submodule command to get valid params
+ cmd = "%s submodule update --help" % (git_path)
+ rc, stdout, stderr = module.run_command(cmd, cwd=cwd)
+ lines = stderr.split('\n')
+ update_line = None
+ for line in lines:
+ if 'git submodule [--quiet] update ' in line:
+ update_line = line
+ if update_line:
+ update_line = update_line.replace('[', '')
+ update_line = update_line.replace(']', '')
+ update_line = update_line.replace('|', ' ')
+ parts = shlex.split(update_line)
+ for part in parts:
+ if part.startswith('--'):
+ part = part.replace('--', '')
+ params.append(part)
+
+ return params
+
+
+def write_ssh_wrapper(module):
+ '''
+ This writes an shell wrapper for ssh options to be used with git
+ this is only relevant for older versions of gitthat cannot
+ handle the options themselves. Returns path to the script
+ '''
+ try:
+ # make sure we have full permission to the module_dir, which
+ # may not be the case if we're sudo'ing to a non-root user
+ if os.access(module.tmpdir, os.W_OK | os.R_OK | os.X_OK):
+ fd, wrapper_path = tempfile.mkstemp(prefix=module.tmpdir + '/')
+ else:
+ raise OSError
+ except (IOError, OSError):
+ fd, wrapper_path = tempfile.mkstemp()
+
+ # use existing git_ssh/ssh_command, fallback to 'ssh'
+ template = b("""#!/bin/sh
+%s $GIT_SSH_OPTS "$@"
+""" % os.environ.get('GIT_SSH', os.environ.get('GIT_SSH_COMMAND', 'ssh')))
+
+ # write it
+ with os.fdopen(fd, 'w+b') as fh:
+ fh.write(template)
+
+ # set execute
+ st = os.stat(wrapper_path)
+ os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC)
+
+ module.debug('Wrote temp git ssh wrapper (%s): %s' % (wrapper_path, template))
+
+ # ensure we cleanup after ourselves
+ module.add_cleanup_file(path=wrapper_path)
+
+ return wrapper_path
+
+
+def set_git_ssh_env(key_file, ssh_opts, git_version, module):
+ '''
+ use environment variables to configure git's ssh execution,
+ which varies by version but this functino should handle all.
+ '''
+
+ # initialise to existing ssh opts and/or append user provided
+ if ssh_opts is None:
+ ssh_opts = os.environ.get('GIT_SSH_OPTS', '')
+ else:
+ ssh_opts = os.environ.get('GIT_SSH_OPTS', '') + ' ' + ssh_opts
+
+ # hostkey acceptance
+ accept_key = "StrictHostKeyChecking=no"
+ if module.params['accept_hostkey'] and accept_key not in ssh_opts:
+ ssh_opts += " -o %s" % accept_key
+
+ # avoid prompts
+ force_batch = 'BatchMode=yes'
+ if force_batch not in ssh_opts:
+ ssh_opts += ' -o %s' % (force_batch)
+
+ # deal with key file
+ if key_file:
+ key_opt = '-i %s' % key_file
+ if key_opt not in ssh_opts:
+ ssh_opts += ' %s' % key_opt
+
+ ikey = 'IdentitiesOnly=yes'
+ if ikey not in ssh_opts:
+ ssh_opts += ' -o %s' % ikey
+
+ # older than 2.3 does not know how to use git_ssh_command,
+ # so we force it into get_ssh var
+ # https://github.com/gitster/git/commit/09d60d785c68c8fa65094ecbe46fbc2a38d0fc1f
+ if git_version < LooseVersion('2.3.0'):
+ # for use in wrapper
+ os.environ["GIT_SSH_OPTS"] = ssh_opts
+
+ # these versions don't support GIT_SSH_OPTS so have to write wrapper
+ wrapper = write_ssh_wrapper(module)
+
+ # force use of git_ssh_opts via wrapper, git_ssh cannot not handle arguments
+ os.environ['GIT_SSH'] = wrapper
+ else:
+ # we construct full finalized command string here
+ full_cmd = os.environ.get('GIT_SSH', os.environ.get('GIT_SSH_COMMAND', 'ssh'))
+ if ssh_opts:
+ full_cmd += ' ' + ssh_opts
+ # git_ssh_command can handle arguments to ssh
+ os.environ["GIT_SSH_COMMAND"] = full_cmd
+
+
+def get_version(module, git_path, dest, ref="HEAD"):
+ ''' samples the version of the git repo '''
+
+ cmd = "%s rev-parse %s" % (git_path, ref)
+ rc, stdout, stderr = module.run_command(cmd, cwd=dest)
+ sha = to_native(stdout).rstrip('\n')
+ return sha
+
+
+def ssh_supports_acceptnewhostkey(module):
+ try:
+ ssh_path = get_bin_path('ssh')
+ except ValueError as err:
+ module.fail_json(
+ msg='Remote host is missing ssh command, so you cannot '
+ 'use acceptnewhostkey option.', details=to_text(err))
+ supports_acceptnewhostkey = True
+ cmd = [ssh_path, '-o', 'StrictHostKeyChecking=accept-new', '-V']
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc != 0:
+ supports_acceptnewhostkey = False
+ return supports_acceptnewhostkey
+
+
+def get_submodule_versions(git_path, module, dest, version='HEAD'):
+ cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version]
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(
+ msg='Unable to determine hashes of submodules',
+ stdout=out,
+ stderr=err,
+ rc=rc)
+ submodules = {}
+ subm_name = None
+ for line in out.splitlines():
+ if line.startswith("Entering '"):
+ subm_name = line[10:-1]
+ elif len(line.strip()) == 40:
+ if subm_name is None:
+ module.fail_json()
+ submodules[subm_name] = line.strip()
+ subm_name = None
+ else:
+ module.fail_json(msg='Unable to parse submodule hash line: %s' % line.strip())
+ if subm_name is not None:
+ module.fail_json(msg='Unable to find hash for submodule: %s' % subm_name)
+
+ return submodules
+
+
+def clone(git_path, module, repo, dest, remote, depth, version, bare,
+ reference, refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_whitelist, single_branch):
+ ''' makes a new git repo if it does not already exist '''
+ dest_dirname = os.path.dirname(dest)
+ try:
+ os.makedirs(dest_dirname)
+ except Exception:
+ pass
+ cmd = [git_path, 'clone']
+
+ if bare:
+ cmd.append('--bare')
+ else:
+ cmd.extend(['--origin', remote])
+
+ is_branch_or_tag = is_remote_branch(git_path, module, dest, repo, version) or is_remote_tag(git_path, module, dest, repo, version)
+ if depth:
+ if version == 'HEAD' or refspec:
+ cmd.extend(['--depth', str(depth)])
+ elif is_branch_or_tag:
+ cmd.extend(['--depth', str(depth)])
+ cmd.extend(['--branch', version])
+ else:
+ # only use depth if the remote object is branch or tag (i.e. fetchable)
+ module.warn("Ignoring depth argument. "
+ "Shallow clones are only available for "
+ "HEAD, branches, tags or in combination with refspec.")
+ if reference:
+ cmd.extend(['--reference', str(reference)])
+
+ if single_branch:
+ if git_version_used is None:
+ module.fail_json(msg='Cannot find git executable at %s' % git_path)
+
+ if git_version_used < LooseVersion('1.7.10'):
+ module.warn("git version '%s' is too old to use 'single-branch'. Ignoring." % git_version_used)
+ else:
+ cmd.append("--single-branch")
+
+ if is_branch_or_tag:
+ cmd.extend(['--branch', version])
+
+ needs_separate_git_dir_fallback = False
+ if separate_git_dir:
+ if git_version_used is None:
+ module.fail_json(msg='Cannot find git executable at %s' % git_path)
+ if git_version_used < LooseVersion('1.7.5'):
+ # git before 1.7.5 doesn't have separate-git-dir argument, do fallback
+ needs_separate_git_dir_fallback = True
+ else:
+ cmd.append('--separate-git-dir=%s' % separate_git_dir)
+
+ cmd.extend([repo, dest])
+ module.run_command(cmd, check_rc=True, cwd=dest_dirname)
+ if needs_separate_git_dir_fallback:
+ relocate_repo(module, result, separate_git_dir, os.path.join(dest, ".git"), dest)
+
+ if bare and remote != 'origin':
+ module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
+
+ if refspec:
+ cmd = [git_path, 'fetch']
+ if depth:
+ cmd.extend(['--depth', str(depth)])
+ cmd.extend([remote, refspec])
+ module.run_command(cmd, check_rc=True, cwd=dest)
+
+ if verify_commit:
+ verify_commit_sign(git_path, module, dest, version, gpg_whitelist)
+
+
+def has_local_mods(module, git_path, dest, bare):
+ if bare:
+ return False
+
+ cmd = "%s status --porcelain" % (git_path)
+ rc, stdout, stderr = module.run_command(cmd, cwd=dest)
+ lines = stdout.splitlines()
+ lines = list(filter(lambda c: not re.search('^\\?\\?.*$', c), lines))
+
+ return len(lines) > 0
+
+
+def reset(git_path, module, dest):
+ '''
+ Resets the index and working tree to HEAD.
+ Discards any changes to tracked files in working
+ tree since that commit.
+ '''
+ cmd = "%s reset --hard HEAD" % (git_path,)
+ return module.run_command(cmd, check_rc=True, cwd=dest)
+
+
+def get_diff(module, git_path, dest, repo, remote, depth, bare, before, after):
+ ''' Return the difference between 2 versions '''
+ if before is None:
+ return {'prepared': '>> Newly checked out %s' % after}
+ elif before != after:
+ # Ensure we have the object we are referring to during git diff !
+ git_version_used = git_version(git_path, module)
+ fetch(git_path, module, repo, dest, after, remote, depth, bare, '', git_version_used)
+ cmd = '%s diff %s %s' % (git_path, before, after)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc == 0 and out:
+ return {'prepared': out}
+ elif rc == 0:
+ return {'prepared': '>> No visual differences between %s and %s' % (before, after)}
+ elif err:
+ return {'prepared': '>> Failed to get proper diff between %s and %s:\n>> %s' % (before, after, err)}
+ else:
+ return {'prepared': '>> Failed to get proper diff between %s and %s' % (before, after)}
+ return {}
+
+
+def get_remote_head(git_path, module, dest, version, remote, bare):
+ cloning = False
+ cwd = None
+ tag = False
+ if remote == module.params['repo']:
+ cloning = True
+ elif remote == 'file://' + os.path.expanduser(module.params['repo']):
+ cloning = True
+ else:
+ cwd = dest
+ if version == 'HEAD':
+ if cloning:
+ # cloning the repo, just get the remote's HEAD version
+ cmd = '%s ls-remote %s -h HEAD' % (git_path, remote)
+ else:
+ head_branch = get_head_branch(git_path, module, dest, remote, bare)
+ cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, head_branch)
+ elif is_remote_branch(git_path, module, dest, remote, version):
+ cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
+ elif is_remote_tag(git_path, module, dest, remote, version):
+ tag = True
+ cmd = '%s ls-remote %s -t refs/tags/%s*' % (git_path, remote, version)
+ else:
+ # appears to be a sha1. return as-is since it appears
+ # cannot check for a specific sha1 on remote
+ return version
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd)
+ if len(out) < 1:
+ module.fail_json(msg="Could not determine remote revision for %s" % version, stdout=out, stderr=err, rc=rc)
+
+ out = to_native(out)
+
+ if tag:
+ # Find the dereferenced tag if this is an annotated tag.
+ for tag in out.split('\n'):
+ if tag.endswith(version + '^{}'):
+ out = tag
+ break
+ elif tag.endswith(version):
+ out = tag
+
+ rev = out.split()[0]
+ return rev
+
+
+def is_remote_tag(git_path, module, dest, remote, version):
+ cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version)
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if to_native(version, errors='surrogate_or_strict') in out:
+ return True
+ else:
+ return False
+
+
+def get_branches(git_path, module, dest):
+ branches = []
+ cmd = '%s branch --no-color -a' % (git_path,)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Could not determine branch data - received %s" % out, stdout=out, stderr=err)
+ for line in out.split('\n'):
+ if line.strip():
+ branches.append(line.strip())
+ return branches
+
+
+def get_annotated_tags(git_path, module, dest):
+ tags = []
+ cmd = [git_path, 'for-each-ref', 'refs/tags/', '--format', '%(objecttype):%(refname:short)']
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Could not determine tag data - received %s" % out, stdout=out, stderr=err)
+ for line in to_native(out).split('\n'):
+ if line.strip():
+ tagtype, tagname = line.strip().split(':')
+ if tagtype == 'tag':
+ tags.append(tagname)
+ return tags
+
+
+def is_remote_branch(git_path, module, dest, remote, version):
+ cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if to_native(version, errors='surrogate_or_strict') in out:
+ return True
+ else:
+ return False
+
+
+def is_local_branch(git_path, module, dest, branch):
+ branches = get_branches(git_path, module, dest)
+ lbranch = '%s' % branch
+ if lbranch in branches:
+ return True
+ elif '* %s' % branch in branches:
+ return True
+ else:
+ return False
+
+
+def is_not_a_branch(git_path, module, dest):
+ branches = get_branches(git_path, module, dest)
+ for branch in branches:
+ if branch.startswith('* ') and ('no branch' in branch or 'detached from' in branch or 'detached at' in branch):
+ return True
+ return False
+
+
+def get_repo_path(dest, bare):
+ if bare:
+ repo_path = dest
+ else:
+ repo_path = os.path.join(dest, '.git')
+ # Check if the .git is a file. If it is a file, it means that the repository is in external directory respective to the working copy (e.g. we are in a
+ # submodule structure).
+ if os.path.isfile(repo_path):
+ with open(repo_path, 'r') as gitfile:
+ data = gitfile.read()
+ ref_prefix, gitdir = data.rstrip().split('gitdir: ', 1)
+ if ref_prefix:
+ raise ValueError('.git file has invalid git dir reference format')
+
+ # There is a possibility the .git file to have an absolute path.
+ if os.path.isabs(gitdir):
+ repo_path = gitdir
+ else:
+ # Use original destination directory with data from .git file.
+ repo_path = os.path.join(dest, gitdir)
+ if not os.path.isdir(repo_path):
+ raise ValueError('%s is not a directory' % repo_path)
+ return repo_path
+
+
+def get_head_branch(git_path, module, dest, remote, bare=False):
+ '''
+ Determine what branch HEAD is associated with. This is partly
+ taken from lib/ansible/utils/__init__.py. It finds the correct
+ path to .git/HEAD and reads from that file the branch that HEAD is
+ associated with. In the case of a detached HEAD, this will look
+ up the branch in .git/refs/remotes/<remote>/HEAD.
+ '''
+ try:
+ repo_path = get_repo_path(dest, bare)
+ except (IOError, ValueError) as err:
+ # No repo path found
+ """``.git`` file does not have a valid format for detached Git dir."""
+ module.fail_json(
+ msg='Current repo does not have a valid reference to a '
+ 'separate Git dir or it refers to the invalid path',
+ details=to_text(err),
+ )
+ # Read .git/HEAD for the name of the branch.
+ # If we're in a detached HEAD state, look up the branch associated with
+ # the remote HEAD in .git/refs/remotes/<remote>/HEAD
+ headfile = os.path.join(repo_path, "HEAD")
+ if is_not_a_branch(git_path, module, dest):
+ headfile = os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD')
+ branch = head_splitter(headfile, remote, module=module, fail_on_error=True)
+ return branch
+
+
+def get_remote_url(git_path, module, dest, remote):
+ '''Return URL of remote source for repo.'''
+ command = [git_path, 'ls-remote', '--get-url', remote]
+ (rc, out, err) = module.run_command(command, cwd=dest)
+ if rc != 0:
+ # There was an issue getting remote URL, most likely
+ # command is not available in this version of Git.
+ return None
+ return to_native(out).rstrip('\n')
+
+
+def set_remote_url(git_path, module, repo, dest, remote):
+ ''' updates repo from remote sources '''
+ # Return if remote URL isn't changing.
+ remote_url = get_remote_url(git_path, module, dest, remote)
+ if remote_url == repo or unfrackgitpath(remote_url) == unfrackgitpath(repo):
+ return False
+
+ command = [git_path, 'remote', 'set-url', remote, repo]
+ (rc, out, err) = module.run_command(command, cwd=dest)
+ if rc != 0:
+ label = "set a new url %s for %s" % (repo, remote)
+ module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
+
+ # Return False if remote_url is None to maintain previous behavior
+ # for Git versions prior to 1.7.5 that lack required functionality.
+ return remote_url is not None
+
+
+def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used, force=False):
+ ''' updates repo from remote sources '''
+ set_remote_url(git_path, module, repo, dest, remote)
+ commands = []
+
+ fetch_str = 'download remote objects and refs'
+ fetch_cmd = [git_path, 'fetch']
+
+ refspecs = []
+ if depth:
+ # try to find the minimal set of refs we need to fetch to get a
+ # successful checkout
+ currenthead = get_head_branch(git_path, module, dest, remote)
+ if refspec:
+ refspecs.append(refspec)
+ elif version == 'HEAD':
+ refspecs.append(currenthead)
+ elif is_remote_branch(git_path, module, dest, repo, version):
+ if currenthead != version:
+ # this workaround is only needed for older git versions
+ # 1.8.3 is broken, 1.9.x works
+ # ensure that remote branch is available as both local and remote ref
+ refspecs.append('+refs/heads/%s:refs/heads/%s' % (version, version))
+ refspecs.append('+refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version))
+ elif is_remote_tag(git_path, module, dest, repo, version):
+ refspecs.append('+refs/tags/' + version + ':refs/tags/' + version)
+ if refspecs:
+ # if refspecs is empty, i.e. version is neither heads nor tags
+ # assume it is a version hash
+ # fall back to a full clone, otherwise we might not be able to checkout
+ # version
+ fetch_cmd.extend(['--depth', str(depth)])
+
+ if not depth or not refspecs:
+ # don't try to be minimalistic but do a full clone
+ # also do this if depth is given, but version is something that can't be fetched directly
+ if bare:
+ refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
+ else:
+ # ensure all tags are fetched
+ if git_version_used >= LooseVersion('1.9'):
+ fetch_cmd.append('--tags')
+ else:
+ # old git versions have a bug in --tags that prevents updating existing tags
+ commands.append((fetch_str, fetch_cmd + [remote]))
+ refspecs = ['+refs/tags/*:refs/tags/*']
+ if refspec:
+ refspecs.append(refspec)
+
+ if force:
+ fetch_cmd.append('--force')
+
+ fetch_cmd.extend([remote])
+
+ commands.append((fetch_str, fetch_cmd + refspecs))
+
+ for (label, command) in commands:
+ (rc, out, err) = module.run_command(command, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to %s: %s %s" % (label, out, err), cmd=command)
+
+
+def submodules_fetch(git_path, module, remote, track_submodules, dest):
+ changed = False
+
+ if not os.path.exists(os.path.join(dest, '.gitmodules')):
+ # no submodules
+ return changed
+
+ gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r')
+ for line in gitmodules_file:
+ # Check for new submodules
+ if not changed and line.strip().startswith('path'):
+ path = line.split('=', 1)[1].strip()
+ # Check that dest/path/.git exists
+ if not os.path.exists(os.path.join(dest, path, '.git')):
+ changed = True
+
+ # Check for updates to existing modules
+ if not changed:
+ # Fetch updates
+ begin = get_submodule_versions(git_path, module, dest)
+ cmd = [git_path, 'submodule', 'foreach', git_path, 'fetch']
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to fetch submodules: %s" % out + err)
+
+ if track_submodules:
+ # Compare against submodule HEAD
+ # FIXME: determine this from .gitmodules
+ version = 'master'
+ after = get_submodule_versions(git_path, module, dest, '%s/%s' % (remote, version))
+ if begin != after:
+ changed = True
+ else:
+ # Compare against the superproject's expectation
+ cmd = [git_path, 'submodule', 'status']
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg='Failed to retrieve submodule status: %s' % out + err)
+ for line in out.splitlines():
+ if line[0] != ' ':
+ changed = True
+ break
+ return changed
+
+
+def submodule_update(git_path, module, dest, track_submodules, force=False):
+ ''' init and update any submodules '''
+
+ # get the valid submodule params
+ params = get_submodule_update_params(module, git_path, dest)
+
+ # skip submodule commands if .gitmodules is not present
+ if not os.path.exists(os.path.join(dest, '.gitmodules')):
+ return (0, '', '')
+ cmd = [git_path, 'submodule', 'sync']
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if 'remote' in params and track_submodules:
+ cmd = [git_path, 'submodule', 'update', '--init', '--recursive', '--remote']
+ else:
+ cmd = [git_path, 'submodule', 'update', '--init', '--recursive']
+ if force:
+ cmd.append('--force')
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to init/update submodules: %s" % out + err)
+ return (rc, out, err)
+
+
+def set_remote_branch(git_path, module, dest, remote, version, depth):
+ """set refs for the remote branch version
+
+ This assumes the branch does not yet exist locally and is therefore also not checked out.
+ Can't use git remote set-branches, as it is not available in git 1.7.1 (centos6)
+ """
+
+ branchref = "+refs/heads/%s:refs/heads/%s" % (version, version)
+ branchref += ' +refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version)
+ cmd = "%s fetch --depth=%s %s %s" % (git_path, depth, remote, branchref)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to fetch branch from remote: %s" % version, stdout=out, stderr=err, rc=rc)
+
+
+def switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_whitelist):
+ cmd = ''
+ if version == 'HEAD':
+ branch = get_head_branch(git_path, module, dest, remote)
+ (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to checkout branch %s" % branch,
+ stdout=out, stderr=err, rc=rc)
+ cmd = "%s reset --hard %s/%s --" % (git_path, remote, branch)
+ else:
+ # FIXME check for local_branch first, should have been fetched already
+ if is_remote_branch(git_path, module, dest, remote, version):
+ if depth and not is_local_branch(git_path, module, dest, version):
+ # git clone --depth implies --single-branch, which makes
+ # the checkout fail if the version changes
+ # fetch the remote branch, to be able to check it out next
+ set_remote_branch(git_path, module, dest, remote, version, depth)
+ if not is_local_branch(git_path, module, dest, version):
+ cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version)
+ else:
+ (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to checkout branch %s" % version, stdout=out, stderr=err, rc=rc)
+ cmd = "%s reset --hard %s/%s" % (git_path, remote, version)
+ else:
+ cmd = "%s checkout --force %s" % (git_path, version)
+ (rc, out1, err1) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ if version != 'HEAD':
+ module.fail_json(msg="Failed to checkout %s" % (version),
+ stdout=out1, stderr=err1, rc=rc, cmd=cmd)
+ else:
+ module.fail_json(msg="Failed to checkout branch %s" % (branch),
+ stdout=out1, stderr=err1, rc=rc, cmd=cmd)
+
+ if verify_commit:
+ verify_commit_sign(git_path, module, dest, version, gpg_whitelist)
+
+ return (rc, out1, err1)
+
+
+def verify_commit_sign(git_path, module, dest, version, gpg_whitelist):
+ if version in get_annotated_tags(git_path, module, dest):
+ git_sub = "verify-tag"
+ else:
+ git_sub = "verify-commit"
+ cmd = "%s %s %s" % (git_path, git_sub, version)
+ if gpg_whitelist:
+ cmd += " --raw"
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version, stdout=out, stderr=err, rc=rc)
+ if gpg_whitelist:
+ fingerprint = get_gpg_fingerprint(err)
+ if fingerprint not in gpg_whitelist:
+ module.fail_json(msg='The gpg_whitelist does not include the public key "%s" for this commit' % fingerprint, stdout=out, stderr=err, rc=rc)
+ return (rc, out, err)
+
+
+def get_gpg_fingerprint(output):
+ """Return a fingerprint of the primary key.
+
+ Ref:
+ https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;hb=HEAD#l482
+ """
+ for line in output.splitlines():
+ data = line.split()
+ if data[1] != 'VALIDSIG':
+ continue
+
+ # if signed with a subkey, this contains the primary key fingerprint
+ data_id = 11 if len(data) == 11 else 2
+ return data[data_id]
+
+
+def git_version(git_path, module):
+ """return the installed version of git"""
+ cmd = "%s --version" % git_path
+ (rc, out, err) = module.run_command(cmd)
+ if rc != 0:
+ # one could fail_json here, but the version info is not that important,
+ # so let's try to fail only on actual git commands
+ return None
+ rematch = re.search('git version (.*)$', to_native(out))
+ if not rematch:
+ return None
+ return LooseVersion(rematch.groups()[0])
+
+
+def git_archive(git_path, module, dest, archive, archive_fmt, archive_prefix, version):
+ """ Create git archive in given source directory """
+ cmd = [git_path, 'archive', '--format', archive_fmt, '--output', archive, version]
+ if archive_prefix is not None:
+ cmd.insert(-1, '--prefix')
+ cmd.insert(-1, archive_prefix)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to perform archive operation",
+ details="Git archive command failed to create "
+ "archive %s using %s directory."
+ "Error: %s" % (archive, dest, err))
+ return rc, out, err
+
+
+def create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result):
+ """ Helper function for creating archive using git_archive """
+ all_archive_fmt = {'.zip': 'zip', '.gz': 'tar.gz', '.tar': 'tar',
+ '.tgz': 'tgz'}
+ _, archive_ext = os.path.splitext(archive)
+ archive_fmt = all_archive_fmt.get(archive_ext, None)
+ if archive_fmt is None:
+ module.fail_json(msg="Unable to get file extension from "
+ "archive file name : %s" % archive,
+ details="Please specify archive as filename with "
+ "extension. File extension can be one "
+ "of ['tar', 'tar.gz', 'zip', 'tgz']")
+
+ repo_name = repo.split("/")[-1].replace(".git", "")
+
+ if os.path.exists(archive):
+ # If git archive file exists, then compare it with new git archive file.
+ # if match, do nothing
+ # if does not match, then replace existing with temp archive file.
+ tempdir = tempfile.mkdtemp()
+ new_archive_dest = os.path.join(tempdir, repo_name)
+ new_archive = new_archive_dest + '.' + archive_fmt
+ git_archive(git_path, module, dest, new_archive, archive_fmt, archive_prefix, version)
+
+ # filecmp is supposed to be efficient than md5sum checksum
+ if filecmp.cmp(new_archive, archive):
+ result.update(changed=False)
+ # Cleanup before exiting
+ try:
+ shutil.rmtree(tempdir)
+ except OSError:
+ pass
+ else:
+ try:
+ shutil.move(new_archive, archive)
+ shutil.rmtree(tempdir)
+ result.update(changed=True)
+ except OSError as e:
+ module.fail_json(msg="Failed to move %s to %s" %
+ (new_archive, archive),
+ details=u"Error occurred while moving : %s"
+ % to_text(e))
+ else:
+ # Perform archive from local directory
+ git_archive(git_path, module, dest, archive, archive_fmt, archive_prefix, version)
+ result.update(changed=True)
+
+# ===========================================
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path'),
+ repo=dict(required=True, aliases=['name']),
+ version=dict(default='HEAD'),
+ remote=dict(default='origin'),
+ refspec=dict(default=None),
+ reference=dict(default=None),
+ force=dict(default='no', type='bool'),
+ depth=dict(default=None, type='int'),
+ clone=dict(default='yes', type='bool'),
+ update=dict(default='yes', type='bool'),
+ verify_commit=dict(default='no', type='bool'),
+ gpg_whitelist=dict(default=[], type='list', elements='str'),
+ accept_hostkey=dict(default='no', type='bool'),
+ accept_newhostkey=dict(default='no', type='bool'),
+ key_file=dict(default=None, type='path', required=False),
+ ssh_opts=dict(default=None, required=False),
+ executable=dict(default=None, type='path'),
+ bare=dict(default='no', type='bool'),
+ recursive=dict(default='yes', type='bool'),
+ single_branch=dict(default=False, type='bool'),
+ track_submodules=dict(default='no', type='bool'),
+ umask=dict(default=None, type='raw'),
+ archive=dict(type='path'),
+ archive_prefix=dict(),
+ separate_git_dir=dict(type='path'),
+ ),
+ mutually_exclusive=[('separate_git_dir', 'bare'), ('accept_hostkey', 'accept_newhostkey')],
+ required_by={'archive_prefix': ['archive']},
+ supports_check_mode=True
+ )
+
+ dest = module.params['dest']
+ repo = module.params['repo']
+ version = module.params['version']
+ remote = module.params['remote']
+ refspec = module.params['refspec']
+ force = module.params['force']
+ depth = module.params['depth']
+ update = module.params['update']
+ allow_clone = module.params['clone']
+ bare = module.params['bare']
+ verify_commit = module.params['verify_commit']
+ gpg_whitelist = module.params['gpg_whitelist']
+ reference = module.params['reference']
+ single_branch = module.params['single_branch']
+ git_path = module.params['executable'] or module.get_bin_path('git', True)
+ key_file = module.params['key_file']
+ ssh_opts = module.params['ssh_opts']
+ umask = module.params['umask']
+ archive = module.params['archive']
+ archive_prefix = module.params['archive_prefix']
+ separate_git_dir = module.params['separate_git_dir']
+
+ result = dict(changed=False, warnings=list())
+
+ if module.params['accept_hostkey']:
+ if ssh_opts is not None:
+ if ("-o StrictHostKeyChecking=no" not in ssh_opts) and ("-o StrictHostKeyChecking=accept-new" not in ssh_opts):
+ ssh_opts += " -o StrictHostKeyChecking=no"
+ else:
+ ssh_opts = "-o StrictHostKeyChecking=no"
+
+ if module.params['accept_newhostkey']:
+ if not ssh_supports_acceptnewhostkey(module):
+ module.warn("Your ssh client does not support accept_newhostkey option, therefore it cannot be used.")
+ else:
+ if ssh_opts is not None:
+ if ("-o StrictHostKeyChecking=no" not in ssh_opts) and ("-o StrictHostKeyChecking=accept-new" not in ssh_opts):
+ ssh_opts += " -o StrictHostKeyChecking=accept-new"
+ else:
+ ssh_opts = "-o StrictHostKeyChecking=accept-new"
+
+ # evaluate and set the umask before doing anything else
+ if umask is not None:
+ if not isinstance(umask, string_types):
+ module.fail_json(msg="umask must be defined as a quoted octal integer")
+ try:
+ umask = int(umask, 8)
+ except Exception:
+ module.fail_json(msg="umask must be an octal integer",
+ details=to_text(sys.exc_info()[1]))
+ os.umask(umask)
+
+ # Certain features such as depth require a file:/// protocol for path based urls
+ # so force a protocol here ...
+ if os.path.expanduser(repo).startswith('/'):
+ repo = 'file://' + os.path.expanduser(repo)
+
+ # We screenscrape a huge amount of git commands so use C locale anytime we
+ # call run_command()
+ locale = get_best_parsable_locale(module)
+ module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale)
+
+ if separate_git_dir:
+ separate_git_dir = os.path.realpath(separate_git_dir)
+
+ gitconfig = None
+ if not dest and allow_clone:
+ module.fail_json(msg="the destination directory must be specified unless clone=no")
+ elif dest:
+ dest = os.path.abspath(dest)
+ try:
+ repo_path = get_repo_path(dest, bare)
+ if separate_git_dir and os.path.exists(repo_path) and separate_git_dir != repo_path:
+ result['changed'] = True
+ if not module.check_mode:
+ relocate_repo(module, result, separate_git_dir, repo_path, dest)
+ repo_path = separate_git_dir
+ except (IOError, ValueError) as err:
+ # No repo path found
+ """``.git`` file does not have a valid format for detached Git dir."""
+ module.fail_json(
+ msg='Current repo does not have a valid reference to a '
+ 'separate Git dir or it refers to the invalid path',
+ details=to_text(err),
+ )
+ gitconfig = os.path.join(repo_path, 'config')
+
+ # iface changes so need it to make decisions
+ git_version_used = git_version(git_path, module)
+
+ # GIT_SSH=<path> as an environment variable, might create sh wrapper script for older versions.
+ set_git_ssh_env(key_file, ssh_opts, git_version_used, module)
+
+ if depth is not None and git_version_used < LooseVersion('1.9.1'):
+ module.warn("git version is too old to fully support the depth argument. Falling back to full checkouts.")
+ depth = None
+
+ recursive = module.params['recursive']
+ track_submodules = module.params['track_submodules']
+
+ result.update(before=None)
+
+ local_mods = False
+ if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone):
+ # if there is no git configuration, do a clone operation unless:
+ # * the user requested no clone (they just want info)
+ # * we're doing a check mode test
+ # In those cases we do an ls-remote
+ if module.check_mode or not allow_clone:
+ remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
+ result.update(changed=True, after=remote_head)
+ if module._diff:
+ diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
+ if diff:
+ result['diff'] = diff
+ module.exit_json(**result)
+ # there's no git config, so clone
+ clone(git_path, module, repo, dest, remote, depth, version, bare, reference,
+ refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_whitelist, single_branch)
+ elif not update:
+ # Just return having found a repo already in the dest path
+ # this does no checking that the repo is the actual repo
+ # requested.
+ result['before'] = get_version(module, git_path, dest)
+ result.update(after=result['before'])
+ if archive:
+ # Git archive is not supported by all git servers, so
+ # we will first clone and perform git archive from local directory
+ if module.check_mode:
+ result.update(changed=True)
+ module.exit_json(**result)
+
+ create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result)
+
+ module.exit_json(**result)
+ else:
+ # else do a pull
+ local_mods = has_local_mods(module, git_path, dest, bare)
+ result['before'] = get_version(module, git_path, dest)
+ if local_mods:
+ # failure should happen regardless of check mode
+ if not force:
+ module.fail_json(msg="Local modifications exist in the destination: " + dest + " (force=no).", **result)
+ # if force and in non-check mode, do a reset
+ if not module.check_mode:
+ reset(git_path, module, dest)
+ result.update(changed=True, msg='Local modifications exist in the destination: ' + dest)
+
+ # exit if already at desired sha version
+ if module.check_mode:
+ remote_url = get_remote_url(git_path, module, dest, remote)
+ remote_url_changed = remote_url and remote_url != repo and unfrackgitpath(remote_url) != unfrackgitpath(repo)
+ else:
+ remote_url_changed = set_remote_url(git_path, module, repo, dest, remote)
+ result.update(remote_url_changed=remote_url_changed)
+
+ if module.check_mode:
+ remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
+ result.update(changed=(result['before'] != remote_head or remote_url_changed), after=remote_head)
+ # FIXME: This diff should fail since the new remote_head is not fetched yet?!
+ if module._diff:
+ diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
+ if diff:
+ result['diff'] = diff
+ module.exit_json(**result)
+ else:
+ fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used, force=force)
+
+ result['after'] = get_version(module, git_path, dest)
+
+ # switch to version specified regardless of whether
+ # we got new revisions from the repository
+ if not bare:
+ switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_whitelist)
+
+ # Deal with submodules
+ submodules_updated = False
+ if recursive and not bare:
+ submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest)
+ if submodules_updated:
+ result.update(submodules_changed=submodules_updated)
+
+ if module.check_mode:
+ result.update(changed=True, after=remote_head)
+ module.exit_json(**result)
+
+ # Switch to version specified
+ submodule_update(git_path, module, dest, track_submodules, force=force)
+
+ # determine if we changed anything
+ result['after'] = get_version(module, git_path, dest)
+
+ if result['before'] != result['after'] or local_mods or submodules_updated or remote_url_changed:
+ result.update(changed=True)
+ if module._diff:
+ diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
+ if diff:
+ result['diff'] = diff
+
+ if archive:
+ # Git archive is not supported by all git servers, so
+ # we will first clone and perform git archive from local directory
+ if module.check_mode:
+ result.update(changed=True)
+ module.exit_json(**result)
+
+ create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/group.py b/lib/ansible/modules/group.py
new file mode 100644
index 0000000..109a161
--- /dev/null
+++ b/lib/ansible/modules/group.py
@@ -0,0 +1,662 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: group
+version_added: "0.0.2"
+short_description: Add or remove groups
+requirements:
+- groupadd
+- groupdel
+- groupmod
+description:
+ - Manage presence of groups on a host.
+ - For Windows targets, use the M(ansible.windows.win_group) module instead.
+options:
+ name:
+ description:
+ - Name of the group to manage.
+ type: str
+ required: true
+ gid:
+ description:
+ - Optional I(GID) to set for the group.
+ type: int
+ state:
+ description:
+ - Whether the group should be present or not on the remote host.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ system:
+ description:
+ - If I(yes), indicates that the group created is a system group.
+ type: bool
+ default: no
+ local:
+ description:
+ - Forces the use of "local" command alternatives on platforms that implement it.
+ - This is useful in environments that use centralized authentication when you want to manipulate the local groups.
+ (for example, it uses C(lgroupadd) instead of C(groupadd)).
+ - This requires that these commands exist on the targeted host, otherwise it will be a fatal error.
+ type: bool
+ default: no
+ version_added: "2.6"
+ non_unique:
+ description:
+ - This option allows to change the group ID to a non-unique value. Requires C(gid).
+ - Not supported on macOS or BusyBox distributions.
+ type: bool
+ default: no
+ version_added: "2.8"
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+seealso:
+- module: ansible.builtin.user
+- module: ansible.windows.win_group
+author:
+- Stephen Fromm (@sfromm)
+'''
+
+EXAMPLES = '''
+- name: Ensure group "somegroup" exists
+ ansible.builtin.group:
+ name: somegroup
+ state: present
+
+- name: Ensure group "docker" exists with correct gid
+ ansible.builtin.group:
+ name: docker
+ state: present
+ gid: 1750
+'''
+
+RETURN = r'''
+gid:
+ description: Group ID of the group.
+ returned: When C(state) is 'present'
+ type: int
+ sample: 1001
+name:
+ description: Group name.
+ returned: always
+ type: str
+ sample: users
+state:
+ description: Whether the group is present or not.
+ returned: always
+ type: str
+ sample: 'absent'
+system:
+ description: Whether the group is a system group or not.
+ returned: When C(state) is 'present'
+ type: bool
+ sample: False
+'''
+
+import grp
+import os
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.sys_info import get_platform_subclass
+
+
+class Group(object):
+ """
+ This is a generic Group manipulation class that is subclassed
+ based on platform.
+
+ A subclass may wish to override the following action methods:-
+ - group_del()
+ - group_add()
+ - group_mod()
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None # type: str | None
+ GROUPFILE = '/etc/group'
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(Group)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.state = module.params['state']
+ self.name = module.params['name']
+ self.gid = module.params['gid']
+ self.system = module.params['system']
+ self.local = module.params['local']
+ self.non_unique = module.params['non_unique']
+
+ def execute_command(self, cmd):
+ return self.module.run_command(cmd)
+
+ def group_del(self):
+ if self.local:
+ command_name = 'lgroupdel'
+ else:
+ command_name = 'groupdel'
+ cmd = [self.module.get_bin_path(command_name, True), self.name]
+ return self.execute_command(cmd)
+
+ def _local_check_gid_exists(self):
+ if self.gid:
+ for gr in grp.getgrall():
+ if self.gid == gr.gr_gid and self.name != gr.gr_name:
+ self.module.fail_json(msg="GID '{0}' already exists with group '{1}'".format(self.gid, gr.gr_name))
+
+ def group_add(self, **kwargs):
+ if self.local:
+ command_name = 'lgroupadd'
+ self._local_check_gid_exists()
+ else:
+ command_name = 'groupadd'
+ cmd = [self.module.get_bin_path(command_name, True)]
+ for key in kwargs:
+ if key == 'gid' and kwargs[key] is not None:
+ cmd.append('-g')
+ cmd.append(str(kwargs[key]))
+ if self.non_unique:
+ cmd.append('-o')
+ elif key == 'system' and kwargs[key] is True:
+ cmd.append('-r')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ if self.local:
+ command_name = 'lgroupmod'
+ self._local_check_gid_exists()
+ else:
+ command_name = 'groupmod'
+ cmd = [self.module.get_bin_path(command_name, True)]
+ info = self.group_info()
+ for key in kwargs:
+ if key == 'gid':
+ if kwargs[key] is not None and info[2] != int(kwargs[key]):
+ cmd.append('-g')
+ cmd.append(str(kwargs[key]))
+ if self.non_unique:
+ cmd.append('-o')
+ if len(cmd) == 1:
+ return (None, '', '')
+ if self.module.check_mode:
+ return (0, '', '')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_exists(self):
+ # The grp module does not distinguish between local and directory accounts.
+ # It's output cannot be used to determine whether or not a group exists locally.
+ # It returns True if the group exists locally or in the directory, so instead
+ # look in the local GROUP file for an existing account.
+ if self.local:
+ if not os.path.exists(self.GROUPFILE):
+ self.module.fail_json(msg="'local: true' specified but unable to find local group file {0} to parse.".format(self.GROUPFILE))
+
+ exists = False
+ name_test = '{0}:'.format(self.name)
+ with open(self.GROUPFILE, 'rb') as f:
+ reversed_lines = f.readlines()[::-1]
+ for line in reversed_lines:
+ if line.startswith(to_bytes(name_test)):
+ exists = True
+ break
+
+ if not exists:
+ self.module.warn(
+ "'local: true' specified and group was not found in {file}. "
+ "The local group may already exist if the local group database exists somewhere other than {file}.".format(file=self.GROUPFILE))
+
+ return exists
+
+ else:
+ try:
+ if grp.getgrnam(self.name):
+ return True
+ except KeyError:
+ return False
+
+ def group_info(self):
+ if not self.group_exists():
+ return False
+ try:
+ info = list(grp.getgrnam(self.name))
+ except KeyError:
+ return False
+ return info
+
+
+# ===========================================
+
+class SunOS(Group):
+ """
+ This is a SunOS Group manipulation class. Solaris doesn't have
+ the 'system' group concept.
+
+ This overrides the following methods from the generic class:-
+ - group_add()
+ """
+
+ platform = 'SunOS'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupadd', True)]
+ for key in kwargs:
+ if key == 'gid' and kwargs[key] is not None:
+ cmd.append('-g')
+ cmd.append(str(kwargs[key]))
+ if self.non_unique:
+ cmd.append('-o')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+# ===========================================
+
+class AIX(Group):
+ """
+ This is a AIX Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+ """
+
+ platform = 'AIX'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('rmgroup', True), self.name]
+ return self.execute_command(cmd)
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('mkgroup', True)]
+ for key in kwargs:
+ if key == 'gid' and kwargs[key] is not None:
+ cmd.append('id=' + str(kwargs[key]))
+ elif key == 'system' and kwargs[key] is True:
+ cmd.append('-a')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ cmd = [self.module.get_bin_path('chgroup', True)]
+ info = self.group_info()
+ for key in kwargs:
+ if key == 'gid':
+ if kwargs[key] is not None and info[2] != int(kwargs[key]):
+ cmd.append('id=' + str(kwargs[key]))
+ if len(cmd) == 1:
+ return (None, '', '')
+ if self.module.check_mode:
+ return (0, '', '')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+# ===========================================
+
+class FreeBsdGroup(Group):
+ """
+ This is a FreeBSD Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+ """
+
+ platform = 'FreeBSD'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name]
+ return self.execute_command(cmd)
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name]
+ if self.gid is not None:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name]
+ info = self.group_info()
+ cmd_len = len(cmd)
+ if self.gid is not None and int(self.gid) != info[2]:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ # modify the group if cmd will do anything
+ if cmd_len != len(cmd):
+ if self.module.check_mode:
+ return (0, '', '')
+ return self.execute_command(cmd)
+ return (None, '', '')
+
+
+class DragonFlyBsdGroup(FreeBsdGroup):
+ """
+ This is a DragonFlyBSD Group manipulation class.
+ It inherits all behaviors from FreeBsdGroup class.
+ """
+
+ platform = 'DragonFly'
+
+
+# ===========================================
+
+class DarwinGroup(Group):
+ """
+ This is a Mac macOS Darwin Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+
+ group manipulation are done using dseditgroup(1).
+ """
+
+ platform = 'Darwin'
+ distribution = None
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('dseditgroup', True)]
+ cmd += ['-o', 'create']
+ if self.gid is not None:
+ cmd += ['-i', str(self.gid)]
+ elif 'system' in kwargs and kwargs['system'] is True:
+ gid = self.get_lowest_available_system_gid()
+ if gid is not False:
+ self.gid = str(gid)
+ cmd += ['-i', str(self.gid)]
+ cmd += ['-L', self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ return (rc, out, err)
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('dseditgroup', True)]
+ cmd += ['-o', 'delete']
+ cmd += ['-L', self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ return (rc, out, err)
+
+ def group_mod(self, gid=None):
+ info = self.group_info()
+ if self.gid is not None and int(self.gid) != info[2]:
+ cmd = [self.module.get_bin_path('dseditgroup', True)]
+ cmd += ['-o', 'edit']
+ if gid is not None:
+ cmd += ['-i', str(gid)]
+ cmd += ['-L', self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ return (rc, out, err)
+ return (None, '', '')
+
+ def get_lowest_available_system_gid(self):
+ # check for lowest available system gid (< 500)
+ try:
+ cmd = [self.module.get_bin_path('dscl', True)]
+ cmd += ['/Local/Default', '-list', '/Groups', 'PrimaryGroupID']
+ (rc, out, err) = self.execute_command(cmd)
+ lines = out.splitlines()
+ highest = 0
+ for group_info in lines:
+ parts = group_info.split(' ')
+ if len(parts) > 1:
+ gid = int(parts[-1])
+ if gid > highest and gid < 500:
+ highest = gid
+ if highest == 0 or highest == 499:
+ return False
+ return (highest + 1)
+ except Exception:
+ return False
+
+
+class OpenBsdGroup(Group):
+ """
+ This is a OpenBSD Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+ """
+
+ platform = 'OpenBSD'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('groupdel', True), self.name]
+ return self.execute_command(cmd)
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupadd', True)]
+ if self.gid is not None:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupmod', True)]
+ info = self.group_info()
+ if self.gid is not None and int(self.gid) != info[2]:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ if len(cmd) == 1:
+ return (None, '', '')
+ if self.module.check_mode:
+ return (0, '', '')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+# ===========================================
+
+class NetBsdGroup(Group):
+ """
+ This is a NetBSD Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+ """
+
+ platform = 'NetBSD'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('groupdel', True), self.name]
+ return self.execute_command(cmd)
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupadd', True)]
+ if self.gid is not None:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupmod', True)]
+ info = self.group_info()
+ if self.gid is not None and int(self.gid) != info[2]:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ if len(cmd) == 1:
+ return (None, '', '')
+ if self.module.check_mode:
+ return (0, '', '')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+# ===========================================
+
+
+class BusyBoxGroup(Group):
+ """
+ BusyBox group manipulation class for systems that have addgroup and delgroup.
+
+ It overrides the following methods:
+ - group_add()
+ - group_del()
+ - group_mod()
+ """
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('addgroup', True)]
+ if self.gid is not None:
+ cmd.extend(['-g', str(self.gid)])
+
+ if self.system:
+ cmd.append('-S')
+
+ cmd.append(self.name)
+
+ return self.execute_command(cmd)
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('delgroup', True), self.name]
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ # Since there is no groupmod command, modify /etc/group directly
+ info = self.group_info()
+ if self.gid is not None and self.gid != info[2]:
+ with open('/etc/group', 'rb') as f:
+ b_groups = f.read()
+
+ b_name = to_bytes(self.name)
+ b_current_group_string = b'%s:x:%d:' % (b_name, info[2])
+ b_new_group_string = b'%s:x:%d:' % (b_name, self.gid)
+
+ if b':%d:' % self.gid in b_groups:
+ self.module.fail_json(msg="gid '{gid}' in use".format(gid=self.gid))
+
+ if self.module.check_mode:
+ return 0, '', ''
+ b_new_groups = b_groups.replace(b_current_group_string, b_new_group_string)
+ with open('/etc/group', 'wb') as f:
+ f.write(b_new_groups)
+ return 0, '', ''
+
+ return None, '', ''
+
+
+class AlpineGroup(BusyBoxGroup):
+
+ platform = 'Linux'
+ distribution = 'Alpine'
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str', required=True),
+ gid=dict(type='int'),
+ system=dict(type='bool', default=False),
+ local=dict(type='bool', default=False),
+ non_unique=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ['non_unique', True, ['gid']],
+ ],
+ )
+
+ group = Group(module)
+
+ module.debug('Group instantiated - platform %s' % group.platform)
+ if group.distribution:
+ module.debug('Group instantiated - distribution %s' % group.distribution)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = group.name
+ result['state'] = group.state
+
+ if group.state == 'absent':
+
+ if group.group_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = group.group_del()
+ if rc != 0:
+ module.fail_json(name=group.name, msg=err)
+
+ elif group.state == 'present':
+
+ if not group.group_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = group.group_add(gid=group.gid, system=group.system)
+ else:
+ (rc, out, err) = group.group_mod(gid=group.gid)
+
+ if rc is not None and rc != 0:
+ module.fail_json(name=group.name, msg=err)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ if group.group_exists():
+ info = group.group_info()
+ result['system'] = group.system
+ result['gid'] = info[2]
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/group_by.py b/lib/ansible/modules/group_by.py
new file mode 100644
index 0000000..ef641f2
--- /dev/null
+++ b/lib/ansible/modules/group_by.py
@@ -0,0 +1,89 @@
+# -*- mode: python -*-
+
+# Copyright: (c) 2012, Jeroen Hoekx (@jhoekx)
+# Copyright: Ansible Team
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: group_by
+short_description: Create Ansible groups based on facts
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+description:
+- Use facts to create ad-hoc groups that can be used later in a playbook.
+- This module is also supported for Windows targets.
+version_added: "0.9"
+options:
+ key:
+ description:
+ - The variables whose values will be used as groups.
+ type: str
+ required: true
+ parents:
+ description:
+ - The list of the parent groups.
+ type: list
+ elements: str
+ default: all
+ version_added: "2.4"
+attributes:
+ action:
+ support: full
+ become:
+ support: none
+ bypass_host_loop:
+ support: full
+ bypass_task_loop:
+ support: none
+ check_mode:
+ details: While this makes no changes to target systems the 'in memory' inventory will still be altered
+ support: partial
+ core:
+ details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
+ support: partial
+ connection:
+ support: none
+ delegation:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ platforms: all
+notes:
+- Spaces in group names are converted to dashes '-'.
+- Though this module does not change the remote host,
+ we do provide 'changed' status as it can be useful
+ for those trying to track inventory changes.
+seealso:
+- module: ansible.builtin.add_host
+author:
+- Jeroen Hoekx (@jhoekx)
+'''
+
+EXAMPLES = r'''
+- name: Create groups based on the machine architecture
+ ansible.builtin.group_by:
+ key: machine_{{ ansible_machine }}
+
+- name: Create groups like 'virt_kvm_host'
+ ansible.builtin.group_by:
+ key: virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }}
+
+- name: Create nested groups
+ ansible.builtin.group_by:
+ key: el{{ ansible_distribution_major_version }}-{{ ansible_architecture }}
+ parents:
+ - el{{ ansible_distribution_major_version }}
+
+- name: Add all active hosts to a static group
+ ansible.builtin.group_by:
+ key: done
+'''
diff --git a/lib/ansible/modules/hostname.py b/lib/ansible/modules/hostname.py
new file mode 100644
index 0000000..f6284df
--- /dev/null
+++ b/lib/ansible/modules/hostname.py
@@ -0,0 +1,908 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Hiroaki Nakamura <hnakamur@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: hostname
+author:
+ - Adrian Likins (@alikins)
+ - Hideki Saito (@saito-hideki)
+version_added: "1.4"
+short_description: Manage hostname
+requirements: [ hostname ]
+description:
+ - Set system's hostname. Supports most OSs/Distributions including those using C(systemd).
+ - Windows, HP-UX, and AIX are not currently supported.
+notes:
+ - This module does B(NOT) modify C(/etc/hosts). You need to modify it yourself using other modules such as M(ansible.builtin.template)
+ or M(ansible.builtin.replace).
+ - On macOS, this module uses C(scutil) to set C(HostName), C(ComputerName), and C(LocalHostName). Since C(LocalHostName)
+ cannot contain spaces or most special characters, this module will replace characters when setting C(LocalHostName).
+options:
+ name:
+ description:
+ - Name of the host.
+ - If the value is a fully qualified domain name that does not resolve from the given host,
+ this will cause the module to hang for a few seconds while waiting for the name resolution attempt to timeout.
+ type: str
+ required: true
+ use:
+ description:
+ - Which strategy to use to update the hostname.
+ - If not set we try to autodetect, but this can be problematic, particularly with containers as they can present misleading information.
+ - Note that 'systemd' should be specified for RHEL/EL/CentOS 7+. Older distributions should use 'redhat'.
+ choices: ['alpine', 'debian', 'freebsd', 'generic', 'macos', 'macosx', 'darwin', 'openbsd', 'openrc', 'redhat', 'sles', 'solaris', 'systemd']
+ type: str
+ version_added: '2.9'
+extends_documentation_fragment:
+- action_common_attributes
+- action_common_attributes.facts
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ facts:
+ support: full
+ platform:
+ platforms: posix
+'''
+
+EXAMPLES = '''
+- name: Set a hostname
+ ansible.builtin.hostname:
+ name: web01
+
+- name: Set a hostname specifying strategy
+ ansible.builtin.hostname:
+ name: web01
+ use: systemd
+'''
+
+import os
+import platform
+import socket
+import traceback
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.basic import (
+ AnsibleModule,
+ get_distribution,
+ get_distribution_version,
+)
+from ansible.module_utils.common.sys_info import get_platform_subclass
+from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
+from ansible.module_utils.facts.utils import get_file_lines, get_file_content
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.six import PY3, text_type
+
+STRATS = {
+ 'alpine': 'Alpine',
+ 'debian': 'Systemd',
+ 'freebsd': 'FreeBSD',
+ 'generic': 'Base',
+ 'macos': 'Darwin',
+ 'macosx': 'Darwin',
+ 'darwin': 'Darwin',
+ 'openbsd': 'OpenBSD',
+ 'openrc': 'OpenRC',
+ 'redhat': 'RedHat',
+ 'sles': 'SLES',
+ 'solaris': 'Solaris',
+ 'systemd': 'Systemd',
+}
+
+
+class BaseStrategy(object):
+ def __init__(self, module):
+ self.module = module
+ self.changed = False
+
+ def update_current_and_permanent_hostname(self):
+ self.update_current_hostname()
+ self.update_permanent_hostname()
+ return self.changed
+
+ def update_current_hostname(self):
+ name = self.module.params['name']
+ current_name = self.get_current_hostname()
+ if current_name != name:
+ if not self.module.check_mode:
+ self.set_current_hostname(name)
+ self.changed = True
+
+ def update_permanent_hostname(self):
+ name = self.module.params['name']
+ permanent_name = self.get_permanent_hostname()
+ if permanent_name != name:
+ if not self.module.check_mode:
+ self.set_permanent_hostname(name)
+ self.changed = True
+
+ def get_current_hostname(self):
+ return self.get_permanent_hostname()
+
+ def set_current_hostname(self, name):
+ pass
+
+ def get_permanent_hostname(self):
+ raise NotImplementedError
+
+ def set_permanent_hostname(self, name):
+ raise NotImplementedError
+
+
+class UnimplementedStrategy(BaseStrategy):
+ def update_current_and_permanent_hostname(self):
+ self.unimplemented_error()
+
+ def update_current_hostname(self):
+ self.unimplemented_error()
+
+ def update_permanent_hostname(self):
+ self.unimplemented_error()
+
+ def get_current_hostname(self):
+ self.unimplemented_error()
+
+ def set_current_hostname(self, name):
+ self.unimplemented_error()
+
+ def get_permanent_hostname(self):
+ self.unimplemented_error()
+
+ def set_permanent_hostname(self, name):
+ self.unimplemented_error()
+
+ def unimplemented_error(self):
+ system = platform.system()
+ distribution = get_distribution()
+ if distribution is not None:
+ msg_platform = '%s (%s)' % (system, distribution)
+ else:
+ msg_platform = system
+ self.module.fail_json(
+ msg='hostname module cannot be used on platform %s' % msg_platform)
+
+
+class CommandStrategy(BaseStrategy):
+ COMMAND = 'hostname'
+
+ def __init__(self, module):
+ super(CommandStrategy, self).__init__(module)
+ self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
+
+ def get_current_hostname(self):
+ cmd = [self.hostname_cmd]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ return to_native(out).strip()
+
+ def set_current_hostname(self, name):
+ cmd = [self.hostname_cmd, name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ def get_permanent_hostname(self):
+ return 'UNKNOWN'
+
+ def set_permanent_hostname(self, name):
+ pass
+
+
+class FileStrategy(BaseStrategy):
+ FILE = '/etc/hostname'
+
+ def get_permanent_hostname(self):
+ if not os.path.isfile(self.FILE):
+ return ''
+
+ try:
+ return get_file_content(self.FILE, default='', strip=True)
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to read hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ def set_permanent_hostname(self, name):
+ try:
+ with open(self.FILE, 'w+') as f:
+ f.write("%s\n" % name)
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to update hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+class SLESStrategy(FileStrategy):
+ """
+ This is a SLES Hostname strategy class - it edits the
+ /etc/HOSTNAME file.
+ """
+ FILE = '/etc/HOSTNAME'
+
+
+class RedHatStrategy(BaseStrategy):
+ """
+ This is a Redhat Hostname strategy class - it edits the
+ /etc/sysconfig/network file.
+ """
+ NETWORK_FILE = '/etc/sysconfig/network'
+
+ def get_permanent_hostname(self):
+ try:
+ for line in get_file_lines(self.NETWORK_FILE):
+ line = to_native(line).strip()
+ if line.startswith('HOSTNAME'):
+ k, v = line.split('=')
+ return v.strip()
+ self.module.fail_json(
+ "Unable to locate HOSTNAME entry in %s" % self.NETWORK_FILE
+ )
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to read hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ def set_permanent_hostname(self, name):
+ try:
+ lines = []
+ found = False
+ content = get_file_content(self.NETWORK_FILE, strip=False) or ""
+ for line in content.splitlines(True):
+ line = to_native(line)
+ if line.strip().startswith('HOSTNAME'):
+ lines.append("HOSTNAME=%s\n" % name)
+ found = True
+ else:
+ lines.append(line)
+ if not found:
+ lines.append("HOSTNAME=%s\n" % name)
+ with open(self.NETWORK_FILE, 'w+') as f:
+ f.writelines(lines)
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to update hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+class AlpineStrategy(FileStrategy):
+ """
+ This is a Alpine Linux Hostname manipulation strategy class - it edits
+ the /etc/hostname file then run hostname -F /etc/hostname.
+ """
+
+ FILE = '/etc/hostname'
+ COMMAND = 'hostname'
+
+ def set_current_hostname(self, name):
+ super(AlpineStrategy, self).set_current_hostname(name)
+ hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
+
+ cmd = [hostname_cmd, '-F', self.FILE]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+
+class SystemdStrategy(BaseStrategy):
+ """
+ This is a Systemd hostname manipulation strategy class - it uses
+ the hostnamectl command.
+ """
+
+ COMMAND = "hostnamectl"
+
+ def __init__(self, module):
+ super(SystemdStrategy, self).__init__(module)
+ self.hostnamectl_cmd = self.module.get_bin_path(self.COMMAND, True)
+
+ def get_current_hostname(self):
+ cmd = [self.hostnamectl_cmd, '--transient', 'status']
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ return to_native(out).strip()
+
+ def set_current_hostname(self, name):
+ if len(name) > 64:
+ self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
+ cmd = [self.hostnamectl_cmd, '--transient', 'set-hostname', name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ def get_permanent_hostname(self):
+ cmd = [self.hostnamectl_cmd, '--static', 'status']
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ return to_native(out).strip()
+
+ def set_permanent_hostname(self, name):
+ if len(name) > 64:
+ self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
+ cmd = [self.hostnamectl_cmd, '--pretty', '--static', 'set-hostname', name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ def update_current_and_permanent_hostname(self):
+ # Must set the permanent hostname prior to current to avoid NetworkManager complaints
+ # about setting the hostname outside of NetworkManager
+ self.update_permanent_hostname()
+ self.update_current_hostname()
+ return self.changed
+
+
+class OpenRCStrategy(BaseStrategy):
+ """
+ This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits
+ the /etc/conf.d/hostname file.
+ """
+
+ FILE = '/etc/conf.d/hostname'
+
+ def get_permanent_hostname(self):
+ if not os.path.isfile(self.FILE):
+ return ''
+
+ try:
+ for line in get_file_lines(self.FILE):
+ line = line.strip()
+ if line.startswith('hostname='):
+ return line[10:].strip('"')
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to read hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ def set_permanent_hostname(self, name):
+ try:
+ lines = [x.strip() for x in get_file_lines(self.FILE)]
+
+ for i, line in enumerate(lines):
+ if line.startswith('hostname='):
+ lines[i] = 'hostname="%s"' % name
+ break
+
+ with open(self.FILE, 'w') as f:
+ f.write('\n'.join(lines) + '\n')
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to update hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+class OpenBSDStrategy(FileStrategy):
+ """
+ This is a OpenBSD family Hostname manipulation strategy class - it edits
+ the /etc/myname file.
+ """
+
+ FILE = '/etc/myname'
+
+
+class SolarisStrategy(BaseStrategy):
+ """
+ This is a Solaris11 or later Hostname manipulation strategy class - it
+ execute hostname command.
+ """
+
+ COMMAND = "hostname"
+
+ def __init__(self, module):
+ super(SolarisStrategy, self).__init__(module)
+ self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
+
+ def set_current_hostname(self, name):
+ cmd_option = '-t'
+ cmd = [self.hostname_cmd, cmd_option, name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ def get_permanent_hostname(self):
+ fmri = 'svc:/system/identity:node'
+ pattern = 'config/nodename'
+ cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern)
+ rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ return to_native(out).strip()
+
+ def set_permanent_hostname(self, name):
+ cmd = [self.hostname_cmd, name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+
+class FreeBSDStrategy(BaseStrategy):
+ """
+ This is a FreeBSD hostname manipulation strategy class - it edits
+ the /etc/rc.conf.d/hostname file.
+ """
+
+ FILE = '/etc/rc.conf.d/hostname'
+ COMMAND = "hostname"
+
+ def __init__(self, module):
+ super(FreeBSDStrategy, self).__init__(module)
+ self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
+
+ def get_current_hostname(self):
+ cmd = [self.hostname_cmd]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ return to_native(out).strip()
+
+ def set_current_hostname(self, name):
+ cmd = [self.hostname_cmd, name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ def get_permanent_hostname(self):
+ if not os.path.isfile(self.FILE):
+ return ''
+
+ try:
+ for line in get_file_lines(self.FILE):
+ line = line.strip()
+ if line.startswith('hostname='):
+ return line[10:].strip('"')
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to read hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ def set_permanent_hostname(self, name):
+ try:
+ if os.path.isfile(self.FILE):
+ lines = [x.strip() for x in get_file_lines(self.FILE)]
+
+ for i, line in enumerate(lines):
+ if line.startswith('hostname='):
+ lines[i] = 'hostname="%s"' % name
+ break
+ else:
+ lines = ['hostname="%s"' % name]
+
+ with open(self.FILE, 'w') as f:
+ f.write('\n'.join(lines) + '\n')
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to update hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+class DarwinStrategy(BaseStrategy):
+ """
+ This is a macOS hostname manipulation strategy class. It uses
+ /usr/sbin/scutil to set ComputerName, HostName, and LocalHostName.
+
+ HostName corresponds to what most platforms consider to be hostname.
+ It controls the name used on the command line and SSH.
+
+ However, macOS also has LocalHostName and ComputerName settings.
+ LocalHostName controls the Bonjour/ZeroConf name, used by services
+ like AirDrop. This class implements a method, _scrub_hostname(), that mimics
+ the transformations macOS makes on hostnames when enterened in the Sharing
+ preference pane. It replaces spaces with dashes and removes all special
+ characters.
+
+ ComputerName is the name used for user-facing GUI services, like the
+ System Preferences/Sharing pane and when users connect to the Mac over the network.
+ """
+
+ def __init__(self, module):
+ super(DarwinStrategy, self).__init__(module)
+
+ self.scutil = self.module.get_bin_path('scutil', True)
+ self.name_types = ('HostName', 'ComputerName', 'LocalHostName')
+ self.scrubbed_name = self._scrub_hostname(self.module.params['name'])
+
+ def _make_translation(self, replace_chars, replacement_chars, delete_chars):
+ if PY3:
+ return str.maketrans(replace_chars, replacement_chars, delete_chars)
+
+ if not isinstance(replace_chars, text_type) or not isinstance(replacement_chars, text_type):
+ raise ValueError('replace_chars and replacement_chars must both be strings')
+ if len(replace_chars) != len(replacement_chars):
+ raise ValueError('replacement_chars must be the same length as replace_chars')
+
+ table = dict(zip((ord(c) for c in replace_chars), replacement_chars))
+ for char in delete_chars:
+ table[ord(char)] = None
+
+ return table
+
+ def _scrub_hostname(self, name):
+ """
+ LocalHostName only accepts valid DNS characters while HostName and ComputerName
+ accept a much wider range of characters. This function aims to mimic how macOS
+ translates a friendly name to the LocalHostName.
+ """
+
+ # Replace all these characters with a single dash
+ name = to_text(name)
+ replace_chars = u'\'"~`!@#$%^&*(){}[]/=?+\\|-_ '
+ delete_chars = u".'"
+ table = self._make_translation(replace_chars, u'-' * len(replace_chars), delete_chars)
+ name = name.translate(table)
+
+ # Replace multiple dashes with a single dash
+ while '-' * 2 in name:
+ name = name.replace('-' * 2, '')
+
+ name = name.rstrip('-')
+ return name
+
+ def get_current_hostname(self):
+ cmd = [self.scutil, '--get', 'HostName']
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0 and 'HostName: not set' not in err:
+ self.module.fail_json(msg="Failed to get current hostname rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ return to_native(out).strip()
+
+ def get_permanent_hostname(self):
+ cmd = [self.scutil, '--get', 'ComputerName']
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Failed to get permanent hostname rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ return to_native(out).strip()
+
+ def set_permanent_hostname(self, name):
+ for hostname_type in self.name_types:
+ cmd = [self.scutil, '--set', hostname_type]
+ if hostname_type == 'LocalHostName':
+ cmd.append(to_native(self.scrubbed_name))
+ else:
+ cmd.append(to_native(name))
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Failed to set {3} to '{2}': {0} {1}".format(to_native(out), to_native(err), to_native(name), hostname_type))
+
+ def set_current_hostname(self, name):
+ pass
+
+ def update_current_hostname(self):
+ pass
+
+ def update_permanent_hostname(self):
+ name = self.module.params['name']
+
+ # Get all the current host name values in the order of self.name_types
+ all_names = tuple(self.module.run_command([self.scutil, '--get', name_type])[1].strip() for name_type in self.name_types)
+
+ # Get the expected host name values based on the order in self.name_types
+ expected_names = tuple(self.scrubbed_name if n == 'LocalHostName' else name for n in self.name_types)
+
+ # Ensure all three names are updated
+ if all_names != expected_names:
+ if not self.module.check_mode:
+ self.set_permanent_hostname(name)
+ self.changed = True
+
+
+class Hostname(object):
+ """
+ This is a generic Hostname manipulation class that is subclassed
+ based on platform.
+
+ A subclass may wish to set different strategy instance to self.strategy.
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None # type: str | None
+ strategy_class = UnimplementedStrategy # type: t.Type[BaseStrategy]
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(Hostname)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params['name']
+ self.use = module.params['use']
+
+ if self.use is not None:
+ strat = globals()['%sStrategy' % STRATS[self.use]]
+ self.strategy = strat(module)
+ elif platform.system() == 'Linux' and ServiceMgrFactCollector.is_systemd_managed(module):
+ # This is Linux and systemd is active
+ self.strategy = SystemdStrategy(module)
+ else:
+ self.strategy = self.strategy_class(module)
+
+ def update_current_and_permanent_hostname(self):
+ return self.strategy.update_current_and_permanent_hostname()
+
+ def get_current_hostname(self):
+ return self.strategy.get_current_hostname()
+
+ def set_current_hostname(self, name):
+ self.strategy.set_current_hostname(name)
+
+ def get_permanent_hostname(self):
+ return self.strategy.get_permanent_hostname()
+
+ def set_permanent_hostname(self, name):
+ self.strategy.set_permanent_hostname(name)
+
+
+class SLESHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Sles'
+ try:
+ distribution_version = get_distribution_version()
+ # cast to float may raise ValueError on non SLES, we use float for a little more safety over int
+ if distribution_version and 10 <= float(distribution_version) <= 12:
+ strategy_class = SLESStrategy # type: t.Type[BaseStrategy]
+ else:
+ raise ValueError()
+ except ValueError:
+ strategy_class = UnimplementedStrategy
+
+
+class RHELHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Redhat'
+ strategy_class = RedHatStrategy
+
+
+class CentOSHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Centos'
+ strategy_class = RedHatStrategy
+
+
+class AnolisOSHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Anolis'
+ strategy_class = RedHatStrategy
+
+
+class CloudlinuxserverHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Cloudlinuxserver'
+ strategy_class = RedHatStrategy
+
+
+class CloudlinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Cloudlinux'
+ strategy_class = RedHatStrategy
+
+
+class AlinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Alinux'
+ strategy_class = RedHatStrategy
+
+
+class ScientificHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Scientific'
+ strategy_class = RedHatStrategy
+
+
+class OracleLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Oracle'
+ strategy_class = RedHatStrategy
+
+
+class VirtuozzoLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Virtuozzo'
+ strategy_class = RedHatStrategy
+
+
+class AmazonLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Amazon'
+ strategy_class = RedHatStrategy
+
+
+class DebianHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Debian'
+ strategy_class = FileStrategy
+
+
+class KylinHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Kylin'
+ strategy_class = FileStrategy
+
+
+class CumulusHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Cumulus-linux'
+ strategy_class = FileStrategy
+
+
+class KaliHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Kali'
+ strategy_class = FileStrategy
+
+
+class ParrotHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Parrot'
+ strategy_class = FileStrategy
+
+
+class UbuntuHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Ubuntu'
+ strategy_class = FileStrategy
+
+
+class LinuxmintHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Linuxmint'
+ strategy_class = FileStrategy
+
+
+class LinaroHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Linaro'
+ strategy_class = FileStrategy
+
+
+class DevuanHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Devuan'
+ strategy_class = FileStrategy
+
+
+class RaspbianHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Raspbian'
+ strategy_class = FileStrategy
+
+
+class UosHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Uos'
+ strategy_class = FileStrategy
+
+
+class DeepinHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Deepin'
+ strategy_class = FileStrategy
+
+
+class GentooHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Gentoo'
+ strategy_class = OpenRCStrategy
+
+
+class ALTLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Altlinux'
+ strategy_class = RedHatStrategy
+
+
+class AlpineLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Alpine'
+ strategy_class = AlpineStrategy
+
+
+class OpenBSDHostname(Hostname):
+ platform = 'OpenBSD'
+ distribution = None
+ strategy_class = OpenBSDStrategy
+
+
+class SolarisHostname(Hostname):
+ platform = 'SunOS'
+ distribution = None
+ strategy_class = SolarisStrategy
+
+
+class FreeBSDHostname(Hostname):
+ platform = 'FreeBSD'
+ distribution = None
+ strategy_class = FreeBSDStrategy
+
+
+class NetBSDHostname(Hostname):
+ platform = 'NetBSD'
+ distribution = None
+ strategy_class = FreeBSDStrategy
+
+
+class NeonHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Neon'
+ strategy_class = FileStrategy
+
+
+class DarwinHostname(Hostname):
+ platform = 'Darwin'
+ distribution = None
+ strategy_class = DarwinStrategy
+
+
+class VoidLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Void'
+ strategy_class = FileStrategy
+
+
+class PopHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Pop'
+ strategy_class = FileStrategy
+
+
+class EurolinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Eurolinux'
+ strategy_class = RedHatStrategy
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ use=dict(type='str', choices=list(STRATS.keys()))
+ ),
+ supports_check_mode=True,
+ )
+
+ hostname = Hostname(module)
+ name = module.params['name']
+
+ current_hostname = hostname.get_current_hostname()
+ permanent_hostname = hostname.get_permanent_hostname()
+
+ changed = hostname.update_current_and_permanent_hostname()
+
+ if name != current_hostname:
+ name_before = current_hostname
+ elif name != permanent_hostname:
+ name_before = permanent_hostname
+ else:
+ name_before = permanent_hostname
+
+ # NOTE: socket.getfqdn() calls gethostbyaddr(socket.gethostname()), which can be
+ # slow to return if the name does not resolve correctly.
+ kw = dict(changed=changed, name=name,
+ ansible_facts=dict(ansible_hostname=name.split('.')[0],
+ ansible_nodename=name,
+ ansible_fqdn=socket.getfqdn(),
+ ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
+
+ if changed:
+ kw['diff'] = {'after': 'hostname = ' + name + '\n',
+ 'before': 'hostname = ' + name_before + '\n'}
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/import_playbook.py b/lib/ansible/modules/import_playbook.py
new file mode 100644
index 0000000..9adaebf
--- /dev/null
+++ b/lib/ansible/modules/import_playbook.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: import_playbook
+short_description: Import a playbook
+description:
+ - Includes a file with a list of plays to be executed.
+ - Files with a list of plays can only be included at the top level.
+ - You cannot use this action inside a play.
+version_added: "2.4"
+options:
+ free-form:
+ description:
+ - The name of the imported playbook is specified directly without any other option.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+ - action_core.import
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: all
+notes:
+ - This is a core feature of Ansible, rather than a module, and cannot be overridden like a module.
+seealso:
+- module: ansible.builtin.import_role
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_role
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- hosts: localhost
+ tasks:
+ - ansible.builtin.debug:
+ msg: play1
+
+- name: Include a play after another play
+ ansible.builtin.import_playbook: otherplays.yaml
+
+- name: Set variables on an imported playbook
+ ansible.builtin.import_playbook: otherplays.yml
+ vars:
+ service: httpd
+
+- name: Include a playbook from a collection
+ ansible.builtin.import_playbook: my_namespace.my_collection.my_playbook
+
+- name: This DOES NOT WORK
+ hosts: all
+ tasks:
+ - ansible.builtin.debug:
+ msg: task1
+
+ - name: This fails because I'm inside a play already
+ ansible.builtin.import_playbook: stuff.yaml
+'''
+
+RETURN = r'''
+# This module does not return anything except plays to execute.
+'''
diff --git a/lib/ansible/modules/import_role.py b/lib/ansible/modules/import_role.py
new file mode 100644
index 0000000..2f118f2
--- /dev/null
+++ b/lib/ansible/modules/import_role.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: import_role
+short_description: Import a role into a play
+description:
+ - Much like the C(roles:) keyword, this task loads a role, but it allows you to control when the role tasks run in
+ between other tasks of the play.
+ - Most keywords, loops and conditionals will only be applied to the imported tasks, not to this statement itself. If
+ you want the opposite behavior, use M(ansible.builtin.include_role) instead.
+ - Does not work in handlers.
+version_added: '2.4'
+options:
+ name:
+ description:
+ - The name of the role to be executed.
+ type: str
+ required: true
+ tasks_from:
+ description:
+ - File to load from a role's C(tasks/) directory.
+ type: str
+ default: main
+ vars_from:
+ description:
+ - File to load from a role's C(vars/) directory.
+ type: str
+ default: main
+ defaults_from:
+ description:
+ - File to load from a role's C(defaults/) directory.
+ type: str
+ default: main
+ allow_duplicates:
+ description:
+ - Overrides the role's metadata setting to allow using a role more than once with the same parameters.
+ type: bool
+ default: yes
+ handlers_from:
+ description:
+ - File to load from a role's C(handlers/) directory.
+ type: str
+ default: main
+ version_added: '2.8'
+ rolespec_validate:
+ description:
+ - Perform role argument spec validation if an argument spec is defined.
+ type: bool
+ default: yes
+ version_added: '2.11'
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+ - action_core.import
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+notes:
+ - Handlers are made available to the whole play.
+ - Since Ansible 2.7 variables defined in C(vars) and C(defaults) for the role are exposed to the play at playbook parsing time.
+ Due to this, these variables will be accessible to roles and tasks executed before the location of the
+ M(ansible.builtin.import_role) task.
+ - Unlike M(ansible.builtin.include_role) variable exposure is not configurable, and will always be exposed.
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_role
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- hosts: all
+ tasks:
+ - ansible.builtin.import_role:
+ name: myrole
+
+ - name: Run tasks/other.yaml instead of 'main'
+ ansible.builtin.import_role:
+ name: myrole
+ tasks_from: other
+
+ - name: Pass variables to role
+ ansible.builtin.import_role:
+ name: myrole
+ vars:
+ rolevar1: value from task
+
+ - name: Apply condition to each task in role
+ ansible.builtin.import_role:
+ name: myrole
+ when: not idontwanttorun
+'''
+
+RETURN = r'''
+# This module does not return anything except tasks to execute.
+'''
diff --git a/lib/ansible/modules/import_tasks.py b/lib/ansible/modules/import_tasks.py
new file mode 100644
index 0000000..e578620
--- /dev/null
+++ b/lib/ansible/modules/import_tasks.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: import_tasks
+short_description: Import a task list
+description:
+ - Imports a list of tasks to be added to the current playbook for subsequent execution.
+version_added: "2.4"
+options:
+ free-form:
+ description:
+ - |
+ Specifies the name of the imported file directly without any other option C(- import_tasks: file.yml).
+ - Most keywords, including loops and conditionals, only apply to the imported tasks, not to this statement itself.
+ - If you need any of those to apply, use M(ansible.builtin.include_tasks) instead.
+ file:
+ description:
+ - Specifies the name of the file that lists tasks to add to the current playbook.
+ type: str
+ version_added: '2.7'
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+ - action_core.import
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+notes:
+ - This is a core feature of Ansible, rather than a module, and cannot be overridden like a module
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_role
+- module: ansible.builtin.include_role
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- hosts: all
+ tasks:
+ - ansible.builtin.debug:
+ msg: task1
+
+ - name: Include task list in play
+ ansible.builtin.import_tasks:
+ file: stuff.yaml
+
+ - ansible.builtin.debug:
+ msg: task10
+
+- hosts: all
+ tasks:
+ - ansible.builtin.debug:
+ msg: task1
+
+ - name: Apply conditional to all imported tasks
+ ansible.builtin.import_tasks: stuff.yaml
+ when: hostvar is defined
+'''
+
+RETURN = r'''
+# This module does not return anything except tasks to execute.
+'''
diff --git a/lib/ansible/modules/include_role.py b/lib/ansible/modules/include_role.py
new file mode 100644
index 0000000..ea7c61e
--- /dev/null
+++ b/lib/ansible/modules/include_role.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: include_role
+short_description: Load and execute a role
+description:
+ - Dynamically loads and executes a specified role as a task.
+ - May be used only where Ansible tasks are allowed - inside C(pre_tasks), C(tasks), or C(post_tasks) play objects, or as a task inside a role.
+ - Task-level keywords, loops, and conditionals apply only to the C(include_role) statement itself.
+ - To apply keywords to the tasks within the role, pass them using the C(apply) option or use M(ansible.builtin.import_role) instead.
+ - Ignores some keywords, like C(until) and C(retries).
+ - This module is also supported for Windows targets.
+ - Does not work in handlers.
+version_added: "2.2"
+options:
+ apply:
+ description:
+ - Accepts a hash of task keywords (e.g. C(tags), C(become)) that will be applied to all tasks within the included role.
+ version_added: '2.7'
+ name:
+ description:
+ - The name of the role to be executed.
+ type: str
+ required: True
+ tasks_from:
+ description:
+ - File to load from a role's C(tasks/) directory.
+ type: str
+ default: main
+ vars_from:
+ description:
+ - File to load from a role's C(vars/) directory.
+ type: str
+ default: main
+ defaults_from:
+ description:
+ - File to load from a role's C(defaults/) directory.
+ type: str
+ default: main
+ allow_duplicates:
+ description:
+ - Overrides the role's metadata setting to allow using a role more than once with the same parameters.
+ type: bool
+ default: yes
+ public:
+ description:
+ - This option dictates whether the role's C(vars) and C(defaults) are exposed to the play. If set to C(true)
+ the variables will be available to tasks following the C(include_role) task. This functionality differs from
+ standard variable exposure for roles listed under the C(roles) header or C(import_role) as they are exposed
+ to the play at playbook parsing time, and available to earlier roles and tasks as well.
+ type: bool
+ default: no
+ version_added: '2.7'
+ handlers_from:
+ description:
+ - File to load from a role's C(handlers/) directory.
+ type: str
+ default: main
+ version_added: '2.8'
+ rolespec_validate:
+ description:
+ - Perform role argument spec validation if an argument spec is defined.
+ type: bool
+ default: yes
+ version_added: '2.11'
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+ - action_core.include
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+notes:
+ - Handlers and are made available to the whole play.
+ - After Ansible 2.4, you can use M(ansible.builtin.import_role) for C(static) behaviour and this action for C(dynamic) one.
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_role
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- ansible.builtin.include_role:
+ name: myrole
+
+- name: Run tasks/other.yaml instead of 'main'
+ ansible.builtin.include_role:
+ name: myrole
+ tasks_from: other
+
+- name: Pass variables to role
+ ansible.builtin.include_role:
+ name: myrole
+ vars:
+ rolevar1: value from task
+
+- name: Use role in loop
+ ansible.builtin.include_role:
+ name: '{{ roleinputvar }}'
+ loop:
+ - '{{ roleinput1 }}'
+ - '{{ roleinput2 }}'
+ loop_control:
+ loop_var: roleinputvar
+
+- name: Conditional role
+ ansible.builtin.include_role:
+ name: myrole
+ when: not idontwanttorun
+
+- name: Apply tags to tasks within included file
+ ansible.builtin.include_role:
+ name: install
+ apply:
+ tags:
+ - install
+ tags:
+ - always
+'''
+
+RETURN = r'''
+# This module does not return anything except tasks to execute.
+'''
diff --git a/lib/ansible/modules/include_tasks.py b/lib/ansible/modules/include_tasks.py
new file mode 100644
index 0000000..ff5d62a
--- /dev/null
+++ b/lib/ansible/modules/include_tasks.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: include_tasks
+short_description: Dynamically include a task list
+description:
+ - Includes a file with a list of tasks to be executed in the current playbook.
+version_added: '2.4'
+options:
+ file:
+ description:
+ - Specifies the name of the file that lists tasks to add to the current playbook.
+ type: str
+ version_added: '2.7'
+ apply:
+ description:
+ - Accepts a hash of task keywords (e.g. C(tags), C(become)) that will be applied to the tasks within the include.
+ type: str
+ version_added: '2.7'
+ free-form:
+ description:
+ - |
+ Specifies the name of the imported file directly without any other option C(- include_tasks: file.yml).
+ - Is the equivalent of specifying an argument for the I(file) parameter.
+ - Most keywords, including loop, with_items, and conditionals, apply to this statement unlike M(ansible.builtin.import_tasks).
+ - The do-until loop is not supported.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+ - action_core.include
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_role
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_role
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- hosts: all
+ tasks:
+ - ansible.builtin.debug:
+ msg: task1
+
+ - name: Include task list in play
+ ansible.builtin.include_tasks:
+ file: stuff.yaml
+
+ - ansible.builtin.debug:
+ msg: task10
+
+- hosts: all
+ tasks:
+ - ansible.builtin.debug:
+ msg: task1
+
+ - name: Include task list in play only if the condition is true
+ ansible.builtin.include_tasks: "{{ hostvar }}.yaml"
+ when: hostvar is defined
+
+- name: Apply tags to tasks within included file
+ ansible.builtin.include_tasks:
+ file: install.yml
+ apply:
+ tags:
+ - install
+ tags:
+ - always
+
+- name: Apply tags to tasks within included file when using free-form
+ ansible.builtin.include_tasks: install.yml
+ args:
+ apply:
+ tags:
+ - install
+ tags:
+ - always
+'''
+
+RETURN = r'''
+# This module does not return anything except tasks to execute.
+'''
diff --git a/lib/ansible/modules/include_vars.py b/lib/ansible/modules/include_vars.py
new file mode 100644
index 0000000..f0aad94
--- /dev/null
+++ b/lib/ansible/modules/include_vars.py
@@ -0,0 +1,196 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Allen Sanabria (@linuxdynasty)
+module: include_vars
+short_description: Load variables from files, dynamically within a task
+description:
+ - Loads YAML/JSON variables dynamically from a file or directory, recursively, during task runtime.
+ - If loading a directory, the files are sorted alphabetically before being loaded.
+ - This module is also supported for Windows targets.
+ - To assign included variables to a different host than C(inventory_hostname),
+ use C(delegate_to) and set C(delegate_facts=yes).
+version_added: "1.4"
+options:
+ file:
+ description:
+ - The file name from which variables should be loaded.
+ - If the path is relative, it will look for the file in vars/ subdirectory of a role or relative to playbook.
+ type: path
+ version_added: "2.2"
+ dir:
+ description:
+ - The directory name from which the variables should be loaded.
+ - If the path is relative and the task is inside a role, it will look inside the role's vars/ subdirectory.
+ - If the path is relative and not inside a role, it will be parsed relative to the playbook.
+ type: path
+ version_added: "2.2"
+ name:
+ description:
+ - The name of a variable into which assign the included vars.
+ - If omitted (null) they will be made top level vars.
+ type: str
+ version_added: "2.2"
+ depth:
+ description:
+ - When using C(dir), this module will, by default, recursively go through each sub directory and load up the
+ variables. By explicitly setting the depth, this module will only go as deep as the depth.
+ type: int
+ default: 0
+ version_added: "2.2"
+ files_matching:
+ description:
+ - Limit the files that are loaded within any directory to this regular expression.
+ type: str
+ version_added: "2.2"
+ ignore_files:
+ description:
+ - List of file names to ignore.
+ type: list
+ elements: str
+ version_added: "2.2"
+ extensions:
+ description:
+ - List of file extensions to read when using C(dir).
+ type: list
+ elements: str
+ default: [ json, yaml, yml ]
+ version_added: "2.3"
+ ignore_unknown_extensions:
+ description:
+ - Ignore unknown file extensions within the directory.
+ - This allows users to specify a directory containing vars files that are intermingled with non-vars files extension types
+ (e.g. a directory with a README in it and vars files).
+ type: bool
+ default: no
+ version_added: "2.7"
+ hash_behaviour:
+ description:
+ - If set to C(merge), merges existing hash variables instead of overwriting them.
+ - If omitted C(null), the behavior falls back to the global I(hash_behaviour) configuration.
+ default: null
+ type: str
+ choices: ["replace", "merge"]
+ version_added: "2.12"
+ free-form:
+ description:
+ - This module allows you to specify the 'file' option directly without any other options.
+ - There is no 'free-form' option, this is just an indicator, see example below.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+attributes:
+ action:
+ details: While the action plugin does do some of the work it relies on the core engine to actually create the variables, that part cannot be overridden
+ support: partial
+ bypass_host_loop:
+ support: none
+ bypass_task_loop:
+ support: none
+ check_mode:
+ support: full
+ delegation:
+ details:
+ - while variable assignment can be delegated to a different host the execution context is always the current inventory_hostname
+ - connection variables, if set at all, would reflect the host it would target, even if we are not connecting at all in this case
+ support: partial
+ diff_mode:
+ support: none
+ core:
+ details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
+ support: partial
+seealso:
+- module: ansible.builtin.set_fact
+- ref: playbooks_delegation
+ description: More information related to task delegation.
+'''
+
+EXAMPLES = r'''
+- name: Include vars of stuff.yaml into the 'stuff' variable (2.2).
+ ansible.builtin.include_vars:
+ file: stuff.yaml
+ name: stuff
+
+- name: Conditionally decide to load in variables into 'plans' when x is 0, otherwise do not. (2.2)
+ ansible.builtin.include_vars:
+ file: contingency_plan.yaml
+ name: plans
+ when: x == 0
+
+- name: Load a variable file based on the OS type, or a default if not found. Using free-form to specify the file.
+ ansible.builtin.include_vars: "{{ lookup('ansible.builtin.first_found', params) }}"
+ vars:
+ params:
+ files:
+ - '{{ansible_distribution}}.yaml'
+ - '{{ansible_os_family}}.yaml'
+ - default.yaml
+ paths:
+ - 'vars'
+
+- name: Bare include (free-form)
+ ansible.builtin.include_vars: myvars.yaml
+
+- name: Include all .json and .jsn files in vars/all and all nested directories (2.3)
+ ansible.builtin.include_vars:
+ dir: vars/all
+ extensions:
+ - 'json'
+ - 'jsn'
+
+- name: Include all default extension files in vars/all and all nested directories and save the output in test. (2.2)
+ ansible.builtin.include_vars:
+ dir: vars/all
+ name: test
+
+- name: Include default extension files in vars/services (2.2)
+ ansible.builtin.include_vars:
+ dir: vars/services
+ depth: 1
+
+- name: Include only files matching bastion.yaml (2.2)
+ ansible.builtin.include_vars:
+ dir: vars
+ files_matching: bastion.yaml
+
+- name: Include all .yaml files except bastion.yaml (2.3)
+ ansible.builtin.include_vars:
+ dir: vars
+ ignore_files:
+ - 'bastion.yaml'
+ extensions:
+ - 'yaml'
+
+- name: Ignore warnings raised for files with unknown extensions while loading (2.7)
+ ansible.builtin.include_vars:
+ dir: vars
+ ignore_unknown_extensions: True
+ extensions:
+ - ''
+ - 'yaml'
+ - 'yml'
+ - 'json'
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Variables that were included and their values
+ returned: success
+ type: dict
+ sample: {'variable': 'value'}
+ansible_included_var_files:
+ description: A list of files that were successfully included
+ returned: success
+ type: list
+ sample: [ /path/to/file.json, /path/to/file.yaml ]
+ version_added: '2.4'
+'''
diff --git a/lib/ansible/modules/iptables.py b/lib/ansible/modules/iptables.py
new file mode 100644
index 0000000..f4dba73
--- /dev/null
+++ b/lib/ansible/modules/iptables.py
@@ -0,0 +1,916 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
+# Copyright: (c) 2017, Sébastien DA ROCHA <sebastien@da-rocha.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iptables
+short_description: Modify iptables rules
+version_added: "2.0"
+author:
+- Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
+- Sébastien DA ROCHA (@sebastiendarocha)
+description:
+ - C(iptables) is used to set up, maintain, and inspect the tables of IP packet
+ filter rules in the Linux kernel.
+ - This module does not handle the saving and/or loading of rules, but rather
+ only manipulates the current rules that are present in memory. This is the
+ same as the behaviour of the C(iptables) and C(ip6tables) command which
+ this module uses internally.
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: linux
+notes:
+ - This module just deals with individual rules. If you need advanced
+ chaining of rules the recommended way is to template the iptables restore
+ file.
+options:
+ table:
+ description:
+ - This option specifies the packet matching table which the command should operate on.
+ - If the kernel is configured with automatic module loading, an attempt will be made
+ to load the appropriate module for that table if it is not already there.
+ type: str
+ choices: [ filter, nat, mangle, raw, security ]
+ default: filter
+ state:
+ description:
+ - Whether the rule should be absent or present.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ action:
+ description:
+ - Whether the rule should be appended at the bottom or inserted at the top.
+ - If the rule already exists the chain will not be modified.
+ type: str
+ choices: [ append, insert ]
+ default: append
+ version_added: "2.2"
+ rule_num:
+ description:
+ - Insert the rule as the given rule number.
+ - This works only with C(action=insert).
+ type: str
+ version_added: "2.5"
+ ip_version:
+ description:
+ - Which version of the IP protocol this rule should apply to.
+ type: str
+ choices: [ ipv4, ipv6 ]
+ default: ipv4
+ chain:
+ description:
+ - Specify the iptables chain to modify.
+ - This could be a user-defined chain or one of the standard iptables chains, like
+ C(INPUT), C(FORWARD), C(OUTPUT), C(PREROUTING), C(POSTROUTING), C(SECMARK) or C(CONNSECMARK).
+ type: str
+ protocol:
+ description:
+ - The protocol of the rule or of the packet to check.
+ - The specified protocol can be one of C(tcp), C(udp), C(udplite), C(icmp), C(ipv6-icmp) or C(icmpv6),
+ C(esp), C(ah), C(sctp) or the special keyword C(all), or it can be a numeric value,
+ representing one of these protocols or a different one.
+ - A protocol name from I(/etc/protocols) is also allowed.
+ - A C(!) argument before the protocol inverts the test.
+ - The number zero is equivalent to all.
+ - C(all) will match with all protocols and is taken as default when this option is omitted.
+ type: str
+ source:
+ description:
+ - Source specification.
+ - Address can be either a network name, a hostname, a network IP address
+ (with /mask), or a plain IP address.
+ - Hostnames will be resolved once only, before the rule is submitted to
+ the kernel. Please note that specifying any name to be resolved with
+ a remote query such as DNS is a really bad idea.
+ - The mask can be either a network mask or a plain number, specifying
+ the number of 1's at the left side of the network mask. Thus, a mask
+ of 24 is equivalent to 255.255.255.0. A C(!) argument before the
+ address specification inverts the sense of the address.
+ type: str
+ destination:
+ description:
+ - Destination specification.
+ - Address can be either a network name, a hostname, a network IP address
+ (with /mask), or a plain IP address.
+ - Hostnames will be resolved once only, before the rule is submitted to
+ the kernel. Please note that specifying any name to be resolved with
+ a remote query such as DNS is a really bad idea.
+ - The mask can be either a network mask or a plain number, specifying
+ the number of 1's at the left side of the network mask. Thus, a mask
+ of 24 is equivalent to 255.255.255.0. A C(!) argument before the
+ address specification inverts the sense of the address.
+ type: str
+ tcp_flags:
+ description:
+ - TCP flags specification.
+ - C(tcp_flags) expects a dict with the two keys C(flags) and C(flags_set).
+ type: dict
+ default: {}
+ version_added: "2.4"
+ suboptions:
+ flags:
+ description:
+ - List of flags you want to examine.
+ type: list
+ elements: str
+ flags_set:
+ description:
+ - Flags to be set.
+ type: list
+ elements: str
+ match:
+ description:
+ - Specifies a match to use, that is, an extension module that tests for
+ a specific property.
+ - The set of matches make up the condition under which a target is invoked.
+ - Matches are evaluated first to last if specified as an array and work in short-circuit
+ fashion, i.e. if one extension yields false, evaluation will stop.
+ type: list
+ elements: str
+ default: []
+ jump:
+ description:
+ - This specifies the target of the rule; i.e., what to do if the packet matches it.
+ - The target can be a user-defined chain (other than the one
+ this rule is in), one of the special builtin targets which decide the
+ fate of the packet immediately, or an extension (see EXTENSIONS
+ below).
+ - If this option is omitted in a rule (and the goto parameter
+ is not used), then matching the rule will have no effect on the
+ packet's fate, but the counters on the rule will be incremented.
+ type: str
+ gateway:
+ description:
+ - This specifies the IP address of host to send the cloned packets.
+ - This option is only valid when C(jump) is set to C(TEE).
+ type: str
+ version_added: "2.8"
+ log_prefix:
+ description:
+ - Specifies a log text for the rule. Only make sense with a LOG jump.
+ type: str
+ version_added: "2.5"
+ log_level:
+ description:
+ - Logging level according to the syslogd-defined priorities.
+ - The value can be strings or numbers from 1-8.
+ - This parameter is only applicable if C(jump) is set to C(LOG).
+ type: str
+ version_added: "2.8"
+ choices: [ '0', '1', '2', '3', '4', '5', '6', '7', 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug' ]
+ goto:
+ description:
+ - This specifies that the processing should continue in a user specified chain.
+ - Unlike the jump argument return will not continue processing in
+ this chain but instead in the chain that called us via jump.
+ type: str
+ in_interface:
+ description:
+ - Name of an interface via which a packet was received (only for packets
+ entering the C(INPUT), C(FORWARD) and C(PREROUTING) chains).
+ - When the C(!) argument is used before the interface name, the sense is inverted.
+ - If the interface name ends in a C(+), then any interface which begins with
+ this name will match.
+ - If this option is omitted, any interface name will match.
+ type: str
+ out_interface:
+ description:
+ - Name of an interface via which a packet is going to be sent (for
+ packets entering the C(FORWARD), C(OUTPUT) and C(POSTROUTING) chains).
+ - When the C(!) argument is used before the interface name, the sense is inverted.
+ - If the interface name ends in a C(+), then any interface which begins
+ with this name will match.
+ - If this option is omitted, any interface name will match.
+ type: str
+ fragment:
+ description:
+ - This means that the rule only refers to second and further fragments
+ of fragmented packets.
+ - Since there is no way to tell the source or destination ports of such
+ a packet (or ICMP type), such a packet will not match any rules which specify them.
+ - When the "!" argument precedes fragment argument, the rule will only match head fragments,
+ or unfragmented packets.
+ type: str
+ set_counters:
+ description:
+ - This enables the administrator to initialize the packet and byte
+ counters of a rule (during C(INSERT), C(APPEND), C(REPLACE) operations).
+ type: str
+ source_port:
+ description:
+ - Source port or port range specification.
+ - This can either be a service name or a port number.
+ - An inclusive range can also be specified, using the format C(first:last).
+ - If the first port is omitted, C(0) is assumed; if the last is omitted, C(65535) is assumed.
+ - If the first port is greater than the second one they will be swapped.
+ type: str
+ destination_port:
+ description:
+ - "Destination port or port range specification. This can either be
+ a service name or a port number. An inclusive range can also be
+ specified, using the format first:last. If the first port is omitted,
+ '0' is assumed; if the last is omitted, '65535' is assumed. If the
+ first port is greater than the second one they will be swapped.
+ This is only valid if the rule also specifies one of the following
+ protocols: tcp, udp, dccp or sctp."
+ type: str
+ destination_ports:
+ description:
+ - This specifies multiple destination port numbers or port ranges to match in the multiport module.
+ - It can only be used in conjunction with the protocols tcp, udp, udplite, dccp and sctp.
+ type: list
+ elements: str
+ version_added: "2.11"
+ to_ports:
+ description:
+ - This specifies a destination port or range of ports to use, without
+ this, the destination port is never altered.
+ - This is only valid if the rule also specifies one of the protocol
+ C(tcp), C(udp), C(dccp) or C(sctp).
+ type: str
+ to_destination:
+ description:
+ - This specifies a destination address to use with C(DNAT).
+ - Without this, the destination address is never altered.
+ type: str
+ version_added: "2.1"
+ to_source:
+ description:
+ - This specifies a source address to use with C(SNAT).
+ - Without this, the source address is never altered.
+ type: str
+ version_added: "2.2"
+ syn:
+ description:
+ - This allows matching packets that have the SYN bit set and the ACK
+ and RST bits unset.
+ - When negated, this matches all packets with the RST or the ACK bits set.
+ type: str
+ choices: [ ignore, match, negate ]
+ default: ignore
+ version_added: "2.5"
+ set_dscp_mark:
+ description:
+ - This allows specifying a DSCP mark to be added to packets.
+ It takes either an integer or hex value.
+ - Mutually exclusive with C(set_dscp_mark_class).
+ type: str
+ version_added: "2.1"
+ set_dscp_mark_class:
+ description:
+ - This allows specifying a predefined DiffServ class which will be
+ translated to the corresponding DSCP mark.
+ - Mutually exclusive with C(set_dscp_mark).
+ type: str
+ version_added: "2.1"
+ comment:
+ description:
+ - This specifies a comment that will be added to the rule.
+ type: str
+ ctstate:
+ description:
+ - A list of the connection states to match in the conntrack module.
+ - Possible values are C(INVALID), C(NEW), C(ESTABLISHED), C(RELATED), C(UNTRACKED), C(SNAT), C(DNAT).
+ type: list
+ elements: str
+ default: []
+ src_range:
+ description:
+ - Specifies the source IP range to match in the iprange module.
+ type: str
+ version_added: "2.8"
+ dst_range:
+ description:
+ - Specifies the destination IP range to match in the iprange module.
+ type: str
+ version_added: "2.8"
+ match_set:
+ description:
+ - Specifies a set name which can be defined by ipset.
+ - Must be used together with the match_set_flags parameter.
+ - When the C(!) argument is prepended then it inverts the rule.
+ - Uses the iptables set extension.
+ type: str
+ version_added: "2.11"
+ match_set_flags:
+ description:
+ - Specifies the necessary flags for the match_set parameter.
+ - Must be used together with the match_set parameter.
+ - Uses the iptables set extension.
+ type: str
+ choices: [ "src", "dst", "src,dst", "dst,src" ]
+ version_added: "2.11"
+ limit:
+ description:
+ - Specifies the maximum average number of matches to allow per second.
+ - The number can specify units explicitly, using C(/second), C(/minute),
+ C(/hour) or C(/day), or parts of them (so C(5/second) is the same as
+ C(5/s)).
+ type: str
+ limit_burst:
+ description:
+ - Specifies the maximum burst before the above limit kicks in.
+ type: str
+ version_added: "2.1"
+ uid_owner:
+ description:
+ - Specifies the UID or username to use in match by owner rule.
+ - From Ansible 2.6 when the C(!) argument is prepended then the it inverts
+ the rule to apply instead to all users except that one specified.
+ type: str
+ version_added: "2.1"
+ gid_owner:
+ description:
+ - Specifies the GID or group to use in match by owner rule.
+ type: str
+ version_added: "2.9"
+ reject_with:
+ description:
+ - 'Specifies the error packet type to return while rejecting. It implies
+ "jump: REJECT".'
+ type: str
+ version_added: "2.1"
+ icmp_type:
+ description:
+ - This allows specification of the ICMP type, which can be a numeric
+ ICMP type, type/code pair, or one of the ICMP type names shown by the
+ command 'iptables -p icmp -h'
+ type: str
+ version_added: "2.2"
+ flush:
+ description:
+ - Flushes the specified table and chain of all rules.
+ - If no chain is specified then the entire table is purged.
+ - Ignores all other parameters.
+ type: bool
+ default: false
+ version_added: "2.2"
+ policy:
+ description:
+ - Set the policy for the chain to the given target.
+ - Only built-in chains can have policies.
+ - This parameter requires the C(chain) parameter.
+ - If you specify this parameter, all other parameters will be ignored.
+ - This parameter is used to set default policy for the given C(chain).
+ Do not confuse this with C(jump) parameter.
+ type: str
+ choices: [ ACCEPT, DROP, QUEUE, RETURN ]
+ version_added: "2.2"
+ wait:
+ description:
+ - Wait N seconds for the xtables lock to prevent multiple instances of
+ the program from running concurrently.
+ type: str
+ version_added: "2.10"
+ chain_management:
+ description:
+ - If C(true) and C(state) is C(present), the chain will be created if needed.
+ - If C(true) and C(state) is C(absent), the chain will be deleted if the only
+ other parameter passed are C(chain) and optionally C(table).
+ type: bool
+ default: false
+ version_added: "2.13"
+'''
+
+EXAMPLES = r'''
+- name: Block specific IP
+ ansible.builtin.iptables:
+ chain: INPUT
+ source: 8.8.8.8
+ jump: DROP
+ become: yes
+
+- name: Forward port 80 to 8600
+ ansible.builtin.iptables:
+ table: nat
+ chain: PREROUTING
+ in_interface: eth0
+ protocol: tcp
+ match: tcp
+ destination_port: 80
+ jump: REDIRECT
+ to_ports: 8600
+ comment: Redirect web traffic to port 8600
+ become: yes
+
+- name: Allow related and established connections
+ ansible.builtin.iptables:
+ chain: INPUT
+ ctstate: ESTABLISHED,RELATED
+ jump: ACCEPT
+ become: yes
+
+- name: Allow new incoming SYN packets on TCP port 22 (SSH)
+ ansible.builtin.iptables:
+ chain: INPUT
+ protocol: tcp
+ destination_port: 22
+ ctstate: NEW
+ syn: match
+ jump: ACCEPT
+ comment: Accept new SSH connections.
+
+- name: Match on IP ranges
+ ansible.builtin.iptables:
+ chain: FORWARD
+ src_range: 192.168.1.100-192.168.1.199
+ dst_range: 10.0.0.1-10.0.0.50
+ jump: ACCEPT
+
+- name: Allow source IPs defined in ipset "admin_hosts" on port 22
+ ansible.builtin.iptables:
+ chain: INPUT
+ match_set: admin_hosts
+ match_set_flags: src
+ destination_port: 22
+ jump: ALLOW
+
+- name: Tag all outbound tcp packets with DSCP mark 8
+ ansible.builtin.iptables:
+ chain: OUTPUT
+ jump: DSCP
+ table: mangle
+ set_dscp_mark: 8
+ protocol: tcp
+
+- name: Tag all outbound tcp packets with DSCP DiffServ class CS1
+ ansible.builtin.iptables:
+ chain: OUTPUT
+ jump: DSCP
+ table: mangle
+ set_dscp_mark_class: CS1
+ protocol: tcp
+
+# Create the user-defined chain ALLOWLIST
+- iptables:
+ chain: ALLOWLIST
+ chain_management: true
+
+# Delete the user-defined chain ALLOWLIST
+- iptables:
+ chain: ALLOWLIST
+ chain_management: true
+ state: absent
+
+- name: Insert a rule on line 5
+ ansible.builtin.iptables:
+ chain: INPUT
+ protocol: tcp
+ destination_port: 8080
+ jump: ACCEPT
+ action: insert
+ rule_num: 5
+
+# Think twice before running following task as this may lock target system
+- name: Set the policy for the INPUT chain to DROP
+ ansible.builtin.iptables:
+ chain: INPUT
+ policy: DROP
+
+- name: Reject tcp with tcp-reset
+ ansible.builtin.iptables:
+ chain: INPUT
+ protocol: tcp
+ reject_with: tcp-reset
+ ip_version: ipv4
+
+- name: Set tcp flags
+ ansible.builtin.iptables:
+ chain: OUTPUT
+ jump: DROP
+ protocol: tcp
+ tcp_flags:
+ flags: ALL
+ flags_set:
+ - ACK
+ - RST
+ - SYN
+ - FIN
+
+- name: Iptables flush filter
+ ansible.builtin.iptables:
+ chain: "{{ item }}"
+ flush: yes
+ with_items: [ 'INPUT', 'FORWARD', 'OUTPUT' ]
+
+- name: Iptables flush nat
+ ansible.builtin.iptables:
+ table: nat
+ chain: '{{ item }}'
+ flush: yes
+ with_items: [ 'INPUT', 'OUTPUT', 'PREROUTING', 'POSTROUTING' ]
+
+- name: Log packets arriving into an user-defined chain
+ ansible.builtin.iptables:
+ chain: LOGGING
+ action: append
+ state: present
+ limit: 2/second
+ limit_burst: 20
+ log_prefix: "IPTABLES:INFO: "
+ log_level: info
+
+- name: Allow connections on multiple ports
+ ansible.builtin.iptables:
+ chain: INPUT
+ protocol: tcp
+ destination_ports:
+ - "80"
+ - "443"
+ - "8081:8083"
+ jump: ACCEPT
+'''
+
+import re
+
+from ansible.module_utils.compat.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+IPTABLES_WAIT_SUPPORT_ADDED = '1.4.20'
+
+IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED = '1.6.0'
+
+BINS = dict(
+ ipv4='iptables',
+ ipv6='ip6tables',
+)
+
+ICMP_TYPE_OPTIONS = dict(
+ ipv4='--icmp-type',
+ ipv6='--icmpv6-type',
+)
+
+
+def append_param(rule, param, flag, is_list):
+ if is_list:
+ for item in param:
+ append_param(rule, item, flag, False)
+ else:
+ if param is not None:
+ if param[0] == '!':
+ rule.extend(['!', flag, param[1:]])
+ else:
+ rule.extend([flag, param])
+
+
+def append_tcp_flags(rule, param, flag):
+ if param:
+ if 'flags' in param and 'flags_set' in param:
+ rule.extend([flag, ','.join(param['flags']), ','.join(param['flags_set'])])
+
+
+def append_match_flag(rule, param, flag, negatable):
+ if param == 'match':
+ rule.extend([flag])
+ elif negatable and param == 'negate':
+ rule.extend(['!', flag])
+
+
+def append_csv(rule, param, flag):
+ if param:
+ rule.extend([flag, ','.join(param)])
+
+
+def append_match(rule, param, match):
+ if param:
+ rule.extend(['-m', match])
+
+
+def append_jump(rule, param, jump):
+ if param:
+ rule.extend(['-j', jump])
+
+
+def append_wait(rule, param, flag):
+ if param:
+ rule.extend([flag, param])
+
+
+def construct_rule(params):
+ rule = []
+ append_wait(rule, params['wait'], '-w')
+ append_param(rule, params['protocol'], '-p', False)
+ append_param(rule, params['source'], '-s', False)
+ append_param(rule, params['destination'], '-d', False)
+ append_param(rule, params['match'], '-m', True)
+ append_tcp_flags(rule, params['tcp_flags'], '--tcp-flags')
+ append_param(rule, params['jump'], '-j', False)
+ if params.get('jump') and params['jump'].lower() == 'tee':
+ append_param(rule, params['gateway'], '--gateway', False)
+ append_param(rule, params['log_prefix'], '--log-prefix', False)
+ append_param(rule, params['log_level'], '--log-level', False)
+ append_param(rule, params['to_destination'], '--to-destination', False)
+ append_match(rule, params['destination_ports'], 'multiport')
+ append_csv(rule, params['destination_ports'], '--dports')
+ append_param(rule, params['to_source'], '--to-source', False)
+ append_param(rule, params['goto'], '-g', False)
+ append_param(rule, params['in_interface'], '-i', False)
+ append_param(rule, params['out_interface'], '-o', False)
+ append_param(rule, params['fragment'], '-f', False)
+ append_param(rule, params['set_counters'], '-c', False)
+ append_param(rule, params['source_port'], '--source-port', False)
+ append_param(rule, params['destination_port'], '--destination-port', False)
+ append_param(rule, params['to_ports'], '--to-ports', False)
+ append_param(rule, params['set_dscp_mark'], '--set-dscp', False)
+ append_param(
+ rule,
+ params['set_dscp_mark_class'],
+ '--set-dscp-class',
+ False)
+ append_match_flag(rule, params['syn'], '--syn', True)
+ if 'conntrack' in params['match']:
+ append_csv(rule, params['ctstate'], '--ctstate')
+ elif 'state' in params['match']:
+ append_csv(rule, params['ctstate'], '--state')
+ elif params['ctstate']:
+ append_match(rule, params['ctstate'], 'conntrack')
+ append_csv(rule, params['ctstate'], '--ctstate')
+ if 'iprange' in params['match']:
+ append_param(rule, params['src_range'], '--src-range', False)
+ append_param(rule, params['dst_range'], '--dst-range', False)
+ elif params['src_range'] or params['dst_range']:
+ append_match(rule, params['src_range'] or params['dst_range'], 'iprange')
+ append_param(rule, params['src_range'], '--src-range', False)
+ append_param(rule, params['dst_range'], '--dst-range', False)
+ if 'set' in params['match']:
+ append_param(rule, params['match_set'], '--match-set', False)
+ append_match_flag(rule, 'match', params['match_set_flags'], False)
+ elif params['match_set']:
+ append_match(rule, params['match_set'], 'set')
+ append_param(rule, params['match_set'], '--match-set', False)
+ append_match_flag(rule, 'match', params['match_set_flags'], False)
+ append_match(rule, params['limit'] or params['limit_burst'], 'limit')
+ append_param(rule, params['limit'], '--limit', False)
+ append_param(rule, params['limit_burst'], '--limit-burst', False)
+ append_match(rule, params['uid_owner'], 'owner')
+ append_match_flag(rule, params['uid_owner'], '--uid-owner', True)
+ append_param(rule, params['uid_owner'], '--uid-owner', False)
+ append_match(rule, params['gid_owner'], 'owner')
+ append_match_flag(rule, params['gid_owner'], '--gid-owner', True)
+ append_param(rule, params['gid_owner'], '--gid-owner', False)
+ if params['jump'] is None:
+ append_jump(rule, params['reject_with'], 'REJECT')
+ append_param(rule, params['reject_with'], '--reject-with', False)
+ append_param(
+ rule,
+ params['icmp_type'],
+ ICMP_TYPE_OPTIONS[params['ip_version']],
+ False)
+ append_match(rule, params['comment'], 'comment')
+ append_param(rule, params['comment'], '--comment', False)
+ return rule
+
+
+def push_arguments(iptables_path, action, params, make_rule=True):
+ cmd = [iptables_path]
+ cmd.extend(['-t', params['table']])
+ cmd.extend([action, params['chain']])
+ if action == '-I' and params['rule_num']:
+ cmd.extend([params['rule_num']])
+ if make_rule:
+ cmd.extend(construct_rule(params))
+ return cmd
+
+
+def check_rule_present(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-C', params)
+ rc, _, __ = module.run_command(cmd, check_rc=False)
+ return (rc == 0)
+
+
+def append_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-A', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def insert_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-I', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def remove_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-D', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def flush_table(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-F', params, make_rule=False)
+ module.run_command(cmd, check_rc=True)
+
+
+def set_chain_policy(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-P', params, make_rule=False)
+ cmd.append(params['policy'])
+ module.run_command(cmd, check_rc=True)
+
+
+def get_chain_policy(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-L', params, make_rule=False)
+ rc, out, _ = module.run_command(cmd, check_rc=True)
+ chain_header = out.split("\n")[0]
+ result = re.search(r'\(policy ([A-Z]+)\)', chain_header)
+ if result:
+ return result.group(1)
+ return None
+
+
+def get_iptables_version(iptables_path, module):
+ cmd = [iptables_path, '--version']
+ rc, out, _ = module.run_command(cmd, check_rc=True)
+ return out.split('v')[1].rstrip('\n')
+
+
+def create_chain(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-N', params, make_rule=False)
+ module.run_command(cmd, check_rc=True)
+
+
+def check_chain_present(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-L', params, make_rule=False)
+ rc, _, __ = module.run_command(cmd, check_rc=False)
+ return (rc == 0)
+
+
+def delete_chain(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-X', params, make_rule=False)
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ table=dict(type='str', default='filter', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ action=dict(type='str', default='append', choices=['append', 'insert']),
+ ip_version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']),
+ chain=dict(type='str'),
+ rule_num=dict(type='str'),
+ protocol=dict(type='str'),
+ wait=dict(type='str'),
+ source=dict(type='str'),
+ to_source=dict(type='str'),
+ destination=dict(type='str'),
+ to_destination=dict(type='str'),
+ match=dict(type='list', elements='str', default=[]),
+ tcp_flags=dict(type='dict',
+ options=dict(
+ flags=dict(type='list', elements='str'),
+ flags_set=dict(type='list', elements='str'))
+ ),
+ jump=dict(type='str'),
+ gateway=dict(type='str'),
+ log_prefix=dict(type='str'),
+ log_level=dict(type='str',
+ choices=['0', '1', '2', '3', '4', '5', '6', '7',
+ 'emerg', 'alert', 'crit', 'error',
+ 'warning', 'notice', 'info', 'debug'],
+ default=None,
+ ),
+ goto=dict(type='str'),
+ in_interface=dict(type='str'),
+ out_interface=dict(type='str'),
+ fragment=dict(type='str'),
+ set_counters=dict(type='str'),
+ source_port=dict(type='str'),
+ destination_port=dict(type='str'),
+ destination_ports=dict(type='list', elements='str', default=[]),
+ to_ports=dict(type='str'),
+ set_dscp_mark=dict(type='str'),
+ set_dscp_mark_class=dict(type='str'),
+ comment=dict(type='str'),
+ ctstate=dict(type='list', elements='str', default=[]),
+ src_range=dict(type='str'),
+ dst_range=dict(type='str'),
+ match_set=dict(type='str'),
+ match_set_flags=dict(type='str', choices=['src', 'dst', 'src,dst', 'dst,src']),
+ limit=dict(type='str'),
+ limit_burst=dict(type='str'),
+ uid_owner=dict(type='str'),
+ gid_owner=dict(type='str'),
+ reject_with=dict(type='str'),
+ icmp_type=dict(type='str'),
+ syn=dict(type='str', default='ignore', choices=['ignore', 'match', 'negate']),
+ flush=dict(type='bool', default=False),
+ policy=dict(type='str', choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']),
+ chain_management=dict(type='bool', default=False),
+ ),
+ mutually_exclusive=(
+ ['set_dscp_mark', 'set_dscp_mark_class'],
+ ['flush', 'policy'],
+ ),
+ required_if=[
+ ['jump', 'TEE', ['gateway']],
+ ['jump', 'tee', ['gateway']],
+ ]
+ )
+ args = dict(
+ changed=False,
+ failed=False,
+ ip_version=module.params['ip_version'],
+ table=module.params['table'],
+ chain=module.params['chain'],
+ flush=module.params['flush'],
+ rule=' '.join(construct_rule(module.params)),
+ state=module.params['state'],
+ chain_management=module.params['chain_management'],
+ )
+
+ ip_version = module.params['ip_version']
+ iptables_path = module.get_bin_path(BINS[ip_version], True)
+
+ # Check if chain option is required
+ if args['flush'] is False and args['chain'] is None:
+ module.fail_json(msg="Either chain or flush parameter must be specified.")
+
+ if module.params.get('log_prefix', None) or module.params.get('log_level', None):
+ if module.params['jump'] is None:
+ module.params['jump'] = 'LOG'
+ elif module.params['jump'] != 'LOG':
+ module.fail_json(msg="Logging options can only be used with the LOG jump target.")
+
+ # Check if wait option is supported
+ iptables_version = LooseVersion(get_iptables_version(iptables_path, module))
+
+ if iptables_version >= LooseVersion(IPTABLES_WAIT_SUPPORT_ADDED):
+ if iptables_version < LooseVersion(IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED):
+ module.params['wait'] = ''
+ else:
+ module.params['wait'] = None
+
+ # Flush the table
+ if args['flush'] is True:
+ args['changed'] = True
+ if not module.check_mode:
+ flush_table(iptables_path, module, module.params)
+
+ # Set the policy
+ elif module.params['policy']:
+ current_policy = get_chain_policy(iptables_path, module, module.params)
+ if not current_policy:
+ module.fail_json(msg='Can\'t detect current policy')
+
+ changed = current_policy != module.params['policy']
+ args['changed'] = changed
+ if changed and not module.check_mode:
+ set_chain_policy(iptables_path, module, module.params)
+
+ # Delete the chain if there is no rule in the arguments
+ elif (args['state'] == 'absent') and not args['rule']:
+ chain_is_present = check_chain_present(
+ iptables_path, module, module.params
+ )
+ args['changed'] = chain_is_present
+
+ if (chain_is_present and args['chain_management'] and not module.check_mode):
+ delete_chain(iptables_path, module, module.params)
+
+ else:
+ insert = (module.params['action'] == 'insert')
+ rule_is_present = check_rule_present(
+ iptables_path, module, module.params
+ )
+ chain_is_present = rule_is_present or check_chain_present(
+ iptables_path, module, module.params
+ )
+ should_be_present = (args['state'] == 'present')
+
+ # Check if target is up to date
+ args['changed'] = (rule_is_present != should_be_present)
+ if args['changed'] is False:
+ # Target is already up to date
+ module.exit_json(**args)
+
+ # Check only; don't modify
+ if not module.check_mode:
+ if should_be_present:
+ if not chain_is_present and args['chain_management']:
+ create_chain(iptables_path, module, module.params)
+
+ if insert:
+ insert_rule(iptables_path, module, module.params)
+ else:
+ append_rule(iptables_path, module, module.params)
+ else:
+ remove_rule(iptables_path, module, module.params)
+
+ module.exit_json(**args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/known_hosts.py b/lib/ansible/modules/known_hosts.py
new file mode 100644
index 0000000..b0c8888
--- /dev/null
+++ b/lib/ansible/modules/known_hosts.py
@@ -0,0 +1,365 @@
+
+# Copyright: (c) 2014, Matthew Vernon <mcv21@cam.ac.uk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: known_hosts
+short_description: Add or remove a host from the C(known_hosts) file
+description:
+ - The C(known_hosts) module lets you add or remove a host keys from the C(known_hosts) file.
+ - Starting at Ansible 2.2, multiple entries per host are allowed, but only one for each key type supported by ssh.
+ This is useful if you're going to want to use the M(ansible.builtin.git) module over ssh, for example.
+ - If you have a very large number of host keys to manage, you will find the M(ansible.builtin.template) module more useful.
+version_added: "1.9"
+options:
+ name:
+ aliases: [ 'host' ]
+ description:
+ - The host to add or remove (must match a host specified in key). It will be converted to lowercase so that ssh-keygen can find it.
+ - Must match with <hostname> or <ip> present in key attribute.
+ - For custom SSH port, C(name) needs to specify port as well. See example section.
+ type: str
+ required: true
+ key:
+ description:
+ - The SSH public host key, as a string.
+ - Required if C(state=present), optional when C(state=absent), in which case all keys for the host are removed.
+ - The key must be in the right format for SSH (see sshd(8), section "SSH_KNOWN_HOSTS FILE FORMAT").
+ - Specifically, the key should not match the format that is found in an SSH pubkey file, but should rather have the hostname prepended to a
+ line that includes the pubkey, the same way that it would appear in the known_hosts file. The value prepended to the line must also match
+ the value of the name parameter.
+ - Should be of format C(<hostname[,IP]> ssh-rsa <pubkey>).
+ - For custom SSH port, C(key) needs to specify port as well. See example section.
+ type: str
+ path:
+ description:
+ - The known_hosts file to edit.
+ - The known_hosts file will be created if needed. The rest of the path must exist prior to running the module.
+ default: "~/.ssh/known_hosts"
+ type: path
+ hash_host:
+ description:
+ - Hash the hostname in the known_hosts file.
+ type: bool
+ default: "no"
+ version_added: "2.3"
+ state:
+ description:
+ - I(present) to add the host key.
+ - I(absent) to remove it.
+ choices: [ "absent", "present" ]
+ default: "present"
+ type: str
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix
+extends_documentation_fragment:
+ - action_common_attributes
+author:
+- Matthew Vernon (@mcv21)
+'''
+
+EXAMPLES = r'''
+- name: Tell the host about our servers it might want to ssh to
+ ansible.builtin.known_hosts:
+ path: /etc/ssh/ssh_known_hosts
+ name: foo.com.invalid
+ key: "{{ lookup('ansible.builtin.file', 'pubkeys/foo.com.invalid') }}"
+
+- name: Another way to call known_hosts
+ ansible.builtin.known_hosts:
+ name: host1.example.com # or 10.9.8.77
+ key: host1.example.com,10.9.8.77 ssh-rsa ASDeararAIUHI324324 # some key gibberish
+ path: /etc/ssh/ssh_known_hosts
+ state: present
+
+- name: Add host with custom SSH port
+ ansible.builtin.known_hosts:
+ name: '[host1.example.com]:2222'
+ key: '[host1.example.com]:2222 ssh-rsa ASDeararAIUHI324324' # some key gibberish
+ path: /etc/ssh/ssh_known_hosts
+ state: present
+'''
+
+# Makes sure public host keys are present or absent in the given known_hosts
+# file.
+#
+# Arguments
+# =========
+# name = hostname whose key should be added (alias: host)
+# key = line(s) to add to known_hosts file
+# path = the known_hosts file to edit (default: ~/.ssh/known_hosts)
+# hash_host = yes|no (default: no) hash the hostname in the known_hosts file
+# state = absent|present (default: present)
+
+import base64
+import errno
+import hashlib
+import hmac
+import os
+import os.path
+import re
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def enforce_state(module, params):
+ """
+ Add or remove key.
+ """
+
+ host = params["name"].lower()
+ key = params.get("key", None)
+ path = params.get("path")
+ hash_host = params.get("hash_host")
+ state = params.get("state")
+ # Find the ssh-keygen binary
+ sshkeygen = module.get_bin_path("ssh-keygen", True)
+
+ if not key and state != "absent":
+ module.fail_json(msg="No key specified when adding a host")
+
+ if key and hash_host:
+ key = hash_host_key(host, key)
+
+ # Trailing newline in files gets lost, so re-add if necessary
+ if key and not key.endswith('\n'):
+ key += '\n'
+
+ sanity_check(module, host, key, sshkeygen)
+
+ found, replace_or_add, found_line = search_for_host_key(module, host, key, path, sshkeygen)
+
+ params['diff'] = compute_diff(path, found_line, replace_or_add, state, key)
+
+ # check if we are trying to remove a non matching key,
+ # in that case return with no change to the host
+ if state == 'absent' and not found_line and key:
+ params['changed'] = False
+ return params
+
+ # We will change state if found==True & state!="present"
+ # or found==False & state=="present"
+ # i.e found XOR (state=="present")
+ # Alternatively, if replace is true (i.e. key present, and we must change
+ # it)
+ if module.check_mode:
+ module.exit_json(changed=replace_or_add or (state == "present") != found,
+ diff=params['diff'])
+
+ # Now do the work.
+
+ # Only remove whole host if found and no key provided
+ if found and not key and state == "absent":
+ module.run_command([sshkeygen, '-R', host, '-f', path], check_rc=True)
+ params['changed'] = True
+
+ # Next, add a new (or replacing) entry
+ if replace_or_add or found != (state == "present"):
+ try:
+ inf = open(path, "r")
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ inf = None
+ else:
+ module.fail_json(msg="Failed to read %s: %s" % (path, str(e)))
+ try:
+ with tempfile.NamedTemporaryFile(mode='w+', dir=os.path.dirname(path), delete=False) as outf:
+ if inf is not None:
+ for line_number, line in enumerate(inf):
+ if found_line == (line_number + 1) and (replace_or_add or state == 'absent'):
+ continue # skip this line to replace its key
+ outf.write(line)
+ inf.close()
+ if state == 'present':
+ outf.write(key)
+ except (IOError, OSError) as e:
+ module.fail_json(msg="Failed to write to file %s: %s" % (path, to_native(e)))
+ else:
+ module.atomic_move(outf.name, path)
+
+ params['changed'] = True
+
+ return params
+
+
+def sanity_check(module, host, key, sshkeygen):
+ '''Check supplied key is sensible
+
+ host and key are parameters provided by the user; If the host
+ provided is inconsistent with the key supplied, then this function
+ quits, providing an error to the user.
+ sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
+ '''
+ # If no key supplied, we're doing a removal, and have nothing to check here.
+ if not key:
+ return
+ # Rather than parsing the key ourselves, get ssh-keygen to do it
+ # (this is essential for hashed keys, but otherwise useful, as the
+ # key question is whether ssh-keygen thinks the key matches the host).
+
+ # The approach is to write the key to a temporary file,
+ # and then attempt to look up the specified host in that file.
+
+ if re.search(r'\S+(\s+)?,(\s+)?', host):
+ module.fail_json(msg="Comma separated list of names is not supported. "
+ "Please pass a single name to lookup in the known_hosts file.")
+
+ with tempfile.NamedTemporaryFile(mode='w+') as outf:
+ try:
+ outf.write(key)
+ outf.flush()
+ except IOError as e:
+ module.fail_json(msg="Failed to write to temporary file %s: %s" %
+ (outf.name, to_native(e)))
+
+ sshkeygen_command = [sshkeygen, '-F', host, '-f', outf.name]
+ rc, stdout, stderr = module.run_command(sshkeygen_command)
+
+ if stdout == '': # host not found
+ module.fail_json(msg="Host parameter does not match hashed host field in supplied key")
+
+
+def search_for_host_key(module, host, key, path, sshkeygen):
+ '''search_for_host_key(module,host,key,path,sshkeygen) -> (found,replace_or_add,found_line)
+
+ Looks up host and keytype in the known_hosts file path; if it's there, looks to see
+ if one of those entries matches key. Returns:
+ found (Boolean): is host found in path?
+ replace_or_add (Boolean): is the key in path different to that supplied by user?
+ found_line (int or None): the line where a key of the same type was found
+ if found=False, then replace is always False.
+ sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
+ '''
+ if os.path.exists(path) is False:
+ return False, False, None
+
+ sshkeygen_command = [sshkeygen, '-F', host, '-f', path]
+
+ # openssh >=6.4 has changed ssh-keygen behaviour such that it returns
+ # 1 if no host is found, whereas previously it returned 0
+ rc, stdout, stderr = module.run_command(sshkeygen_command, check_rc=False)
+ if stdout == '' and stderr == '' and (rc == 0 or rc == 1):
+ return False, False, None # host not found, no other errors
+ if rc != 0: # something went wrong
+ module.fail_json(msg="ssh-keygen failed (rc=%d, stdout='%s',stderr='%s')" % (rc, stdout, stderr))
+
+ # If user supplied no key, we don't want to try and replace anything with it
+ if not key:
+ return True, False, None
+
+ lines = stdout.split('\n')
+ new_key = normalize_known_hosts_key(key)
+
+ for lnum, l in enumerate(lines):
+ if l == '':
+ continue
+ elif l[0] == '#': # info output from ssh-keygen; contains the line number where key was found
+ try:
+ # This output format has been hardcoded in ssh-keygen since at least OpenSSH 4.0
+ # It always outputs the non-localized comment before the found key
+ found_line = int(re.search(r'found: line (\d+)', l).group(1))
+ except IndexError:
+ module.fail_json(msg="failed to parse output of ssh-keygen for line number: '%s'" % l)
+ else:
+ found_key = normalize_known_hosts_key(l)
+ if new_key['host'][:3] == '|1|' and found_key['host'][:3] == '|1|': # do not change host hash if already hashed
+ new_key['host'] = found_key['host']
+ if new_key == found_key: # found a match
+ return True, False, found_line # found exactly the same key, don't replace
+ elif new_key['type'] == found_key['type']: # found a different key for the same key type
+ return True, True, found_line
+
+ # No match found, return found and replace, but no line
+ return True, True, None
+
+
+def hash_host_key(host, key):
+ hmac_key = os.urandom(20)
+ hashed_host = hmac.new(hmac_key, to_bytes(host), hashlib.sha1).digest()
+ parts = key.strip().split()
+ # @ indicates the optional marker field used for @cert-authority or @revoked
+ i = 1 if parts[0][0] == '@' else 0
+ parts[i] = '|1|%s|%s' % (to_native(base64.b64encode(hmac_key)), to_native(base64.b64encode(hashed_host)))
+ return ' '.join(parts)
+
+
+def normalize_known_hosts_key(key):
+ '''
+ Transform a key, either taken from a known_host file or provided by the
+ user, into a normalized form.
+ The host part (which might include multiple hostnames or be hashed) gets
+ replaced by the provided host. Also, any spurious information gets removed
+ from the end (like the username@host tag usually present in hostkeys, but
+ absent in known_hosts files)
+ '''
+ key = key.strip() # trim trailing newline
+ k = key.split()
+ d = dict()
+ # The optional "marker" field, used for @cert-authority or @revoked
+ if k[0][0] == '@':
+ d['options'] = k[0]
+ d['host'] = k[1]
+ d['type'] = k[2]
+ d['key'] = k[3]
+ else:
+ d['host'] = k[0]
+ d['type'] = k[1]
+ d['key'] = k[2]
+ return d
+
+
+def compute_diff(path, found_line, replace_or_add, state, key):
+ diff = {
+ 'before_header': path,
+ 'after_header': path,
+ 'before': '',
+ 'after': '',
+ }
+ try:
+ inf = open(path, "r")
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ diff['before_header'] = '/dev/null'
+ else:
+ diff['before'] = inf.read()
+ inf.close()
+ lines = diff['before'].splitlines(1)
+ if (replace_or_add or state == 'absent') and found_line is not None and 1 <= found_line <= len(lines):
+ del lines[found_line - 1]
+ if state == 'present' and (replace_or_add or found_line is None):
+ lines.append(key)
+ diff['after'] = ''.join(lines)
+ return diff
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str', aliases=['host']),
+ key=dict(required=False, type='str', no_log=False),
+ path=dict(default="~/.ssh/known_hosts", type='path'),
+ hash_host=dict(required=False, type='bool', default=False),
+ state=dict(default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True
+ )
+
+ results = enforce_state(module, module.params)
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/lineinfile.py b/lib/ansible/modules/lineinfile.py
new file mode 100644
index 0000000..0e1b76f
--- /dev/null
+++ b/lib/ansible/modules/lineinfile.py
@@ -0,0 +1,638 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# Copyright: (c) 2014, Ahti Kitsik <ak@ahtik.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: lineinfile
+short_description: Manage lines in text files
+description:
+ - This module ensures a particular line is in a file, or replace an
+ existing line using a back-referenced regular expression.
+ - This is primarily useful when you want to change a single line in a file only.
+ - See the M(ansible.builtin.replace) module if you want to change multiple, similar lines
+ or check M(ansible.builtin.blockinfile) if you want to insert/update/remove a block of lines in a file.
+ For other cases, see the M(ansible.builtin.copy) or M(ansible.builtin.template) modules.
+version_added: "0.7"
+options:
+ path:
+ description:
+ - The file to modify.
+ - Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name).
+ type: path
+ required: true
+ aliases: [ dest, destfile, name ]
+ regexp:
+ description:
+ - The regular expression to look for in every line of the file.
+ - For C(state=present), the pattern to replace if found. Only the last line found will be replaced.
+ - For C(state=absent), the pattern of the line(s) to remove.
+ - If the regular expression is not matched, the line will be
+ added to the file in keeping with C(insertbefore) or C(insertafter)
+ settings.
+ - When modifying a line the regexp should typically match both the initial state of
+ the line as well as its state after replacement by C(line) to ensure idempotence.
+ - Uses Python regular expressions. See U(https://docs.python.org/3/library/re.html).
+ type: str
+ aliases: [ regex ]
+ version_added: '1.7'
+ search_string:
+ description:
+ - The literal string to look for in every line of the file. This does not have to match the entire line.
+ - For C(state=present), the line to replace if the string is found in the file. Only the last line found will be replaced.
+ - For C(state=absent), the line(s) to remove if the string is in the line.
+ - If the literal expression is not matched, the line will be
+ added to the file in keeping with C(insertbefore) or C(insertafter)
+ settings.
+ - Mutually exclusive with C(backrefs) and C(regexp).
+ type: str
+ version_added: '2.11'
+ state:
+ description:
+ - Whether the line should be there or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ line:
+ description:
+ - The line to insert/replace into the file.
+ - Required for C(state=present).
+ - If C(backrefs) is set, may contain backreferences that will get
+ expanded with the C(regexp) capture groups if the regexp matches.
+ type: str
+ aliases: [ value ]
+ backrefs:
+ description:
+ - Used with C(state=present).
+ - If set, C(line) can contain backreferences (both positional and named)
+ that will get populated if the C(regexp) matches.
+ - This parameter changes the operation of the module slightly;
+ C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
+ does not match anywhere in the file, the file will be left unchanged.
+ - If the C(regexp) does match, the last matching line will be replaced by
+ the expanded line parameter.
+ - Mutually exclusive with C(search_string).
+ type: bool
+ default: no
+ version_added: "1.1"
+ insertafter:
+ description:
+ - Used with C(state=present).
+ - If specified, the line will be inserted after the last match of specified regular expression.
+ - If the first match is required, use(firstmatch=yes).
+ - A special value is available; C(EOF) for inserting the line at the end of the file.
+ - If specified regular expression has no matches, EOF will be used instead.
+ - If C(insertbefore) is set, default value C(EOF) will be ignored.
+ - If regular expressions are passed to both C(regexp) and C(insertafter), C(insertafter) is only honored if no match for C(regexp) is found.
+ - May not be used with C(backrefs) or C(insertbefore).
+ type: str
+ choices: [ EOF, '*regex*' ]
+ default: EOF
+ insertbefore:
+ description:
+ - Used with C(state=present).
+ - If specified, the line will be inserted before the last match of specified regular expression.
+ - If the first match is required, use C(firstmatch=yes).
+ - A value is available; C(BOF) for inserting the line at the beginning of the file.
+ - If specified regular expression has no matches, the line will be inserted at the end of the file.
+ - If regular expressions are passed to both C(regexp) and C(insertbefore), C(insertbefore) is only honored if no match for C(regexp) is found.
+ - May not be used with C(backrefs) or C(insertafter).
+ type: str
+ choices: [ BOF, '*regex*' ]
+ version_added: "1.1"
+ create:
+ description:
+ - Used with C(state=present).
+ - If specified, the file will be created if it does not already exist.
+ - By default it will fail if the file is missing.
+ type: bool
+ default: no
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ firstmatch:
+ description:
+ - Used with C(insertafter) or C(insertbefore).
+ - If set, C(insertafter) and C(insertbefore) will work with the first line that matches the given regular expression.
+ type: bool
+ default: no
+ version_added: "2.5"
+ others:
+ description:
+ - All arguments accepted by the M(ansible.builtin.file) module also work here.
+ type: str
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.files
+ - files
+ - validate
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix
+ safe_file_operations:
+ support: full
+ vault:
+ support: none
+notes:
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+seealso:
+- module: ansible.builtin.blockinfile
+- module: ansible.builtin.copy
+- module: ansible.builtin.file
+- module: ansible.builtin.replace
+- module: ansible.builtin.template
+- module: community.windows.win_lineinfile
+author:
+ - Daniel Hokka Zakrissoni (@dhozac)
+ - Ahti Kitsik (@ahtik)
+ - Jose Angel Munoz (@imjoseangel)
+'''
+
+EXAMPLES = r'''
+# NOTE: Before 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
+- name: Ensure SELinux is set to enforcing mode
+ ansible.builtin.lineinfile:
+ path: /etc/selinux/config
+ regexp: '^SELINUX='
+ line: SELINUX=enforcing
+
+- name: Make sure group wheel is not in the sudoers configuration
+ ansible.builtin.lineinfile:
+ path: /etc/sudoers
+ state: absent
+ regexp: '^%wheel'
+
+- name: Replace a localhost entry with our own
+ ansible.builtin.lineinfile:
+ path: /etc/hosts
+ regexp: '^127\.0\.0\.1'
+ line: 127.0.0.1 localhost
+ owner: root
+ group: root
+ mode: '0644'
+
+- name: Replace a localhost entry searching for a literal string to avoid escaping
+ ansible.builtin.lineinfile:
+ path: /etc/hosts
+ search_string: '127.0.0.1'
+ line: 127.0.0.1 localhost
+ owner: root
+ group: root
+ mode: '0644'
+
+- name: Ensure the default Apache port is 8080
+ ansible.builtin.lineinfile:
+ path: /etc/httpd/conf/httpd.conf
+ regexp: '^Listen '
+ insertafter: '^#Listen '
+ line: Listen 8080
+
+- name: Ensure php extension matches new pattern
+ ansible.builtin.lineinfile:
+ path: /etc/httpd/conf/httpd.conf
+ search_string: '<FilesMatch ".php[45]?$">'
+ insertafter: '^\t<Location \/>\n'
+ line: ' <FilesMatch ".php[34]?$">'
+
+- name: Ensure we have our own comment added to /etc/services
+ ansible.builtin.lineinfile:
+ path: /etc/services
+ regexp: '^# port for http'
+ insertbefore: '^www.*80/tcp'
+ line: '# port for http by default'
+
+- name: Add a line to a file if the file does not exist, without passing regexp
+ ansible.builtin.lineinfile:
+ path: /tmp/testfile
+ line: 192.168.1.99 foo.lab.net foo
+ create: yes
+
+# NOTE: Yaml requires escaping backslashes in double quotes but not in single quotes
+- name: Ensure the JBoss memory settings are exactly as needed
+ ansible.builtin.lineinfile:
+ path: /opt/jboss-as/bin/standalone.conf
+ regexp: '^(.*)Xms(\d+)m(.*)$'
+ line: '\1Xms${xms}m\3'
+ backrefs: yes
+
+# NOTE: Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs.
+- name: Validate the sudoers file before saving
+ ansible.builtin.lineinfile:
+ path: /etc/sudoers
+ state: present
+ regexp: '^%ADMIN ALL='
+ line: '%ADMIN ALL=(ALL) NOPASSWD: ALL'
+ validate: /usr/sbin/visudo -cf %s
+
+# See https://docs.python.org/3/library/re.html for further details on syntax
+- name: Use backrefs with alternative group syntax to avoid conflicts with variable values
+ ansible.builtin.lineinfile:
+ path: /tmp/config
+ regexp: ^(host=).*
+ line: \g<1>{{ hostname }}
+ backrefs: yes
+'''
+
+RETURN = r'''#'''
+
+import os
+import re
+import tempfile
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+def write_changes(module, b_lines, dest):
+
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ with os.fdopen(tmpfd, 'wb') as f:
+ f.writelines(b_lines)
+
+ validate = module.params.get('validate', None)
+ valid = not validate
+ if validate:
+ if "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % (validate))
+ (rc, out, err) = module.run_command(to_bytes(validate % tmpfile, errors='surrogate_or_strict'))
+ valid = rc == 0
+ if rc != 0:
+ module.fail_json(msg='failed to validate: '
+ 'rc:%s error:%s' % (rc, err))
+ if valid:
+ module.atomic_move(tmpfile,
+ to_native(os.path.realpath(to_bytes(dest, errors='surrogate_or_strict')), errors='surrogate_or_strict'),
+ unsafe_writes=module.params['unsafe_writes'])
+
+
+def check_file_attrs(module, changed, message, diff):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False, diff=diff):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def present(module, dest, regexp, search_string, line, insertafter, insertbefore, create,
+ backup, backrefs, firstmatch):
+
+ diff = {'before': '',
+ 'after': '',
+ 'before_header': '%s (content)' % dest,
+ 'after_header': '%s (content)' % dest}
+
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ if not os.path.exists(b_dest):
+ if not create:
+ module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
+ b_destpath = os.path.dirname(b_dest)
+ if b_destpath and not os.path.exists(b_destpath) and not module.check_mode:
+ try:
+ os.makedirs(b_destpath)
+ except Exception as e:
+ module.fail_json(msg='Error creating %s (%s)' % (to_text(b_destpath), to_text(e)))
+
+ b_lines = []
+ else:
+ with open(b_dest, 'rb') as f:
+ b_lines = f.readlines()
+
+ if module._diff:
+ diff['before'] = to_native(b''.join(b_lines))
+
+ if regexp is not None:
+ bre_m = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
+
+ if insertafter not in (None, 'BOF', 'EOF'):
+ bre_ins = re.compile(to_bytes(insertafter, errors='surrogate_or_strict'))
+ elif insertbefore not in (None, 'BOF'):
+ bre_ins = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict'))
+ else:
+ bre_ins = None
+
+ # index[0] is the line num where regexp has been found
+ # index[1] is the line num where insertafter/insertbefore has been found
+ index = [-1, -1]
+ match = None
+ exact_line_match = False
+ b_line = to_bytes(line, errors='surrogate_or_strict')
+
+ # The module's doc says
+ # "If regular expressions are passed to both regexp and
+ # insertafter, insertafter is only honored if no match for regexp is found."
+ # Therefore:
+ # 1. regexp or search_string was found -> ignore insertafter, replace the founded line
+ # 2. regexp or search_string was not found -> insert the line after 'insertafter' or 'insertbefore' line
+
+ # Given the above:
+ # 1. First check that there is no match for regexp:
+ if regexp is not None:
+ for lineno, b_cur_line in enumerate(b_lines):
+ match_found = bre_m.search(b_cur_line)
+ if match_found:
+ index[0] = lineno
+ match = match_found
+ if firstmatch:
+ break
+
+ # 2. Second check that there is no match for search_string:
+ if search_string is not None:
+ for lineno, b_cur_line in enumerate(b_lines):
+ match_found = to_bytes(search_string, errors='surrogate_or_strict') in b_cur_line
+ if match_found:
+ index[0] = lineno
+ match = match_found
+ if firstmatch:
+ break
+
+ # 3. When no match found on the previous step,
+ # parse for searching insertafter/insertbefore:
+ if not match:
+ for lineno, b_cur_line in enumerate(b_lines):
+ if b_line == b_cur_line.rstrip(b'\r\n'):
+ index[0] = lineno
+ exact_line_match = True
+
+ elif bre_ins is not None and bre_ins.search(b_cur_line):
+ if insertafter:
+ # + 1 for the next line
+ index[1] = lineno + 1
+ if firstmatch:
+ break
+
+ if insertbefore:
+ # index[1] for the previous line
+ index[1] = lineno
+ if firstmatch:
+ break
+
+ msg = ''
+ changed = False
+ b_linesep = to_bytes(os.linesep, errors='surrogate_or_strict')
+ # Exact line or Regexp matched a line in the file
+ if index[0] != -1:
+ if backrefs and match:
+ b_new_line = match.expand(b_line)
+ else:
+ # Don't do backref expansion if not asked.
+ b_new_line = b_line
+
+ if not b_new_line.endswith(b_linesep):
+ b_new_line += b_linesep
+
+ # If no regexp or search_string was given and no line match is found anywhere in the file,
+ # insert the line appropriately if using insertbefore or insertafter
+ if (regexp, search_string, match) == (None, None, None) and not exact_line_match:
+
+ # Insert lines
+ if insertafter and insertafter != 'EOF':
+ # Ensure there is a line separator after the found string
+ # at the end of the file.
+ if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'):
+ b_lines[-1] = b_lines[-1] + b_linesep
+
+ # If the line to insert after is at the end of the file
+ # use the appropriate index value.
+ if len(b_lines) == index[1]:
+ if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
+ b_lines.append(b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+ elif b_lines[index[1]].rstrip(b'\r\n') != b_line:
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ elif insertbefore and insertbefore != 'BOF':
+ # If the line to insert before is at the beginning of the file
+ # use the appropriate index value.
+ if index[1] <= 0:
+ if b_lines[index[1]].rstrip(b'\r\n') != b_line:
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ elif b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ elif b_lines[index[0]] != b_new_line:
+ b_lines[index[0]] = b_new_line
+ msg = 'line replaced'
+ changed = True
+
+ elif backrefs:
+ # Do absolutely nothing, since it's not safe generating the line
+ # without the regexp matching to populate the backrefs.
+ pass
+ # Add it to the beginning of the file
+ elif insertbefore == 'BOF' or insertafter == 'BOF':
+ b_lines.insert(0, b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+ # Add it to the end of the file if requested or
+ # if insertafter/insertbefore didn't match anything
+ # (so default behaviour is to add at the end)
+ elif insertafter == 'EOF' or index[1] == -1:
+
+ # If the file is not empty then ensure there's a newline before the added line
+ if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'):
+ b_lines.append(b_linesep)
+
+ b_lines.append(b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ elif insertafter and index[1] != -1:
+
+ # Don't insert the line if it already matches at the index.
+ # If the line to insert after is at the end of the file use the appropriate index value.
+ if len(b_lines) == index[1]:
+ if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
+ b_lines.append(b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+ elif b_line != b_lines[index[1]].rstrip(b'\n\r'):
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ # insert matched, but not the regexp or search_string
+ else:
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ if module._diff:
+ diff['after'] = to_native(b''.join(b_lines))
+
+ backupdest = ""
+ if changed and not module.check_mode:
+ if backup and os.path.exists(b_dest):
+ backupdest = module.backup_local(dest)
+ write_changes(module, b_lines, dest)
+
+ if module.check_mode and not os.path.exists(b_dest):
+ module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff)
+
+ attr_diff = {}
+ msg, changed = check_file_attrs(module, changed, msg, attr_diff)
+
+ attr_diff['before_header'] = '%s (file attributes)' % dest
+ attr_diff['after_header'] = '%s (file attributes)' % dest
+
+ difflist = [diff, attr_diff]
+ module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist)
+
+
+def absent(module, dest, regexp, search_string, line, backup):
+
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ if not os.path.exists(b_dest):
+ module.exit_json(changed=False, msg="file not present")
+
+ msg = ''
+ diff = {'before': '',
+ 'after': '',
+ 'before_header': '%s (content)' % dest,
+ 'after_header': '%s (content)' % dest}
+
+ with open(b_dest, 'rb') as f:
+ b_lines = f.readlines()
+
+ if module._diff:
+ diff['before'] = to_native(b''.join(b_lines))
+
+ if regexp is not None:
+ bre_c = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
+ found = []
+
+ b_line = to_bytes(line, errors='surrogate_or_strict')
+
+ def matcher(b_cur_line):
+ if regexp is not None:
+ match_found = bre_c.search(b_cur_line)
+ elif search_string is not None:
+ match_found = to_bytes(search_string, errors='surrogate_or_strict') in b_cur_line
+ else:
+ match_found = b_line == b_cur_line.rstrip(b'\r\n')
+ if match_found:
+ found.append(b_cur_line)
+ return not match_found
+
+ b_lines = [l for l in b_lines if matcher(l)]
+ changed = len(found) > 0
+
+ if module._diff:
+ diff['after'] = to_native(b''.join(b_lines))
+
+ backupdest = ""
+ if changed and not module.check_mode:
+ if backup:
+ backupdest = module.backup_local(dest)
+ write_changes(module, b_lines, dest)
+
+ if changed:
+ msg = "%s line(s) removed" % len(found)
+
+ attr_diff = {}
+ msg, changed = check_file_attrs(module, changed, msg, attr_diff)
+
+ attr_diff['before_header'] = '%s (file attributes)' % dest
+ attr_diff['after_header'] = '%s (file attributes)' % dest
+
+ difflist = [diff, attr_diff]
+
+ module.exit_json(changed=changed, found=len(found), msg=msg, backup=backupdest, diff=difflist)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ regexp=dict(type='str', aliases=['regex']),
+ search_string=dict(type='str'),
+ line=dict(type='str', aliases=['value']),
+ insertafter=dict(type='str'),
+ insertbefore=dict(type='str'),
+ backrefs=dict(type='bool', default=False),
+ create=dict(type='bool', default=False),
+ backup=dict(type='bool', default=False),
+ firstmatch=dict(type='bool', default=False),
+ validate=dict(type='str'),
+ ),
+ mutually_exclusive=[
+ ['insertbefore', 'insertafter'], ['regexp', 'search_string'], ['backrefs', 'search_string']],
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ create = params['create']
+ backup = params['backup']
+ backrefs = params['backrefs']
+ path = params['path']
+ firstmatch = params['firstmatch']
+ regexp = params['regexp']
+ search_string = params['search_string']
+ line = params['line']
+
+ if '' in [regexp, search_string]:
+ msg = ("The %s is an empty string, which will match every line in the file. "
+ "This may have unintended consequences, such as replacing the last line in the file rather than appending.")
+ param_name = 'search string'
+ if regexp == '':
+ param_name = 'regular expression'
+ msg += " If this is desired, use '^' to match every line in the file and avoid this warning."
+ module.warn(msg % param_name)
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if os.path.isdir(b_path):
+ module.fail_json(rc=256, msg='Path %s is a directory !' % path)
+
+ if params['state'] == 'present':
+ if backrefs and regexp is None:
+ module.fail_json(msg='regexp is required with backrefs=true')
+
+ if line is None:
+ module.fail_json(msg='line is required with state=present')
+
+ # Deal with the insertafter default value manually, to avoid errors
+ # because of the mutually_exclusive mechanism.
+ ins_bef, ins_aft = params['insertbefore'], params['insertafter']
+ if ins_bef is None and ins_aft is None:
+ ins_aft = 'EOF'
+
+ present(module, path, regexp, search_string, line,
+ ins_aft, ins_bef, create, backup, backrefs, firstmatch)
+ else:
+ if (regexp, search_string, line) == (None, None, None):
+ module.fail_json(msg='one of line, search_string, or regexp is required with state=absent')
+
+ absent(module, path, regexp, search_string, line, backup)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/meta.py b/lib/ansible/modules/meta.py
new file mode 100644
index 0000000..1b062c9
--- /dev/null
+++ b/lib/ansible/modules/meta.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible, a Red Hat company
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: meta
+short_description: Execute Ansible 'actions'
+version_added: '1.2'
+description:
+ - Meta tasks are a special kind of task which can influence Ansible internal execution or state.
+ - Meta tasks can be used anywhere within your playbook.
+ - This module is also supported for Windows targets.
+options:
+ free_form:
+ description:
+ - This module takes a free form command, as a string. There is not an actual option named "free form". See the examples!
+ - C(flush_handlers) makes Ansible run any handler tasks which have thus far been notified. Ansible inserts these tasks internally at certain
+ points to implicitly trigger handler runs (after pre/post tasks, the final role execution, and the main tasks section of your plays).
+ - C(refresh_inventory) (added in Ansible 2.0) forces the reload of the inventory, which in the case of dynamic inventory scripts means they will be
+ re-executed. If the dynamic inventory script is using a cache, Ansible cannot know this and has no way of refreshing it (you can disable the cache
+ or, if available for your specific inventory datasource (e.g. aws), you can use the an inventory plugin instead of an inventory script).
+ This is mainly useful when additional hosts are created and users wish to use them instead of using the M(ansible.builtin.add_host) module.
+ - C(noop) (added in Ansible 2.0) This literally does 'nothing'. It is mainly used internally and not recommended for general use.
+ - C(clear_facts) (added in Ansible 2.1) causes the gathered facts for the hosts specified in the play's list of hosts to be cleared,
+ including the fact cache.
+ - C(clear_host_errors) (added in Ansible 2.1) clears the failed state (if any) from hosts specified in the play's list of hosts.
+ - C(end_play) (added in Ansible 2.2) causes the play to end without failing the host(s). Note that this affects all hosts.
+ - C(reset_connection) (added in Ansible 2.3) interrupts a persistent connection (i.e. ssh + control persist)
+ - C(end_host) (added in Ansible 2.8) is a per-host variation of C(end_play). Causes the play to end for the current host without failing it.
+ - C(end_batch) (added in Ansible 2.12) causes the current batch (see C(serial)) to end without failing the host(s).
+ Note that with C(serial=0) or undefined this behaves the same as C(end_play).
+ choices: [ clear_facts, clear_host_errors, end_host, end_play, flush_handlers, noop, refresh_inventory, reset_connection, end_batch ]
+ required: true
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+attributes:
+ action:
+ support: none
+ bypass_host_loop:
+ details: Some of the subactions ignore the host loop, see the description above for each specific action for the exceptions
+ support: partial
+ bypass_task_loop:
+ details: Most of the subactions ignore the task loop, see the description above for each specific action for the exceptions
+ support: partial
+ check_mode:
+ details: While these actions don't modify the targets directly they do change possible states of the target within the run
+ support: partial
+ delegation:
+ support: none
+ diff_mode:
+ support: none
+ ignore_conditional:
+ details: Only some options support conditionals and when they do they act 'bypassing the host loop', taking the values from first available host
+ support: partial
+ connection:
+ details: Most options in this action do not use a connection, except C(reset_connection) which still does not connect to the remote
+ support: partial
+notes:
+ - C(clear_facts) will remove the persistent facts from M(ansible.builtin.set_fact) using C(cacheable=True),
+ but not the current host variable it creates for the current run.
+ - Skipping C(meta) tasks with tags is not supported before Ansible 2.11.
+seealso:
+- module: ansible.builtin.assert
+- module: ansible.builtin.fail
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = r'''
+# Example showing flushing handlers on demand, not at end of play
+- ansible.builtin.template:
+ src: new.j2
+ dest: /etc/config.txt
+ notify: myhandler
+
+- name: Force all notified handlers to run at this point, not waiting for normal sync points
+ ansible.builtin.meta: flush_handlers
+
+# Example showing how to refresh inventory during play
+- name: Reload inventory, useful with dynamic inventories when play makes changes to the existing hosts
+ cloud_guest: # this is fake module
+ name: newhost
+ state: present
+
+- name: Refresh inventory to ensure new instances exist in inventory
+ ansible.builtin.meta: refresh_inventory
+
+# Example showing how to clear all existing facts of targeted hosts
+- name: Clear gathered facts from all currently targeted hosts
+ ansible.builtin.meta: clear_facts
+
+# Example showing how to continue using a failed target
+- name: Bring host back to play after failure
+ ansible.builtin.copy:
+ src: file
+ dest: /etc/file
+ remote_user: imightnothavepermission
+
+- ansible.builtin.meta: clear_host_errors
+
+# Example showing how to reset an existing connection
+- ansible.builtin.user:
+ name: '{{ ansible_user }}'
+ groups: input
+
+- name: Reset ssh connection to allow user changes to affect 'current login user'
+ ansible.builtin.meta: reset_connection
+
+# Example showing how to end the play for specific targets
+- name: End the play for hosts that run CentOS 6
+ ansible.builtin.meta: end_host
+ when:
+ - ansible_distribution == 'CentOS'
+ - ansible_distribution_major_version == '6'
+'''
diff --git a/lib/ansible/modules/package.py b/lib/ansible/modules/package.py
new file mode 100644
index 0000000..6078739
--- /dev/null
+++ b/lib/ansible/modules/package.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: package
+version_added: 2.0
+author:
+ - Ansible Core Team
+short_description: Generic OS package manager
+description:
+ - This modules manages packages on a target without specifying a package manager module (like M(ansible.builtin.yum), M(ansible.builtin.apt), ...).
+ It is convenient to use in an heterogeneous environment of machines without having to create a specific task for
+ each package manager. C(package) calls behind the module for the package manager used by the operating system
+ discovered by the module M(ansible.builtin.setup). If C(setup) was not yet run, C(package) will run it.
+ - This module acts as a proxy to the underlying package manager module. While all arguments will be passed to the
+ underlying module, not all modules support the same arguments. This documentation only covers the minimum intersection
+ of module arguments that all packaging modules support.
+ - For Windows targets, use the M(ansible.windows.win_package) module instead.
+options:
+ name:
+ description:
+ - Package name, or package specifier with version.
+ - Syntax varies with package manager. For example C(name-1.0) or C(name=1.0).
+ - Package names also vary with package manager; this module will not "translate" them per distro. For example C(libyaml-dev), C(libyaml-devel).
+ required: true
+ state:
+ description:
+ - Whether to install (C(present)), or remove (C(absent)) a package.
+ - You can use other states like C(latest) ONLY if they are supported by the underlying package module(s) executed.
+ required: true
+ use:
+ description:
+ - The required package manager module to use (C(yum), C(apt), and so on). The default 'auto' will use existing facts or try to autodetect it.
+ - You should only use this field if the automatic selection is not working for some reason.
+ default: auto
+requirements:
+ - Whatever is required for the package plugins specific for each system.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: full
+ bypass_host_loop:
+ support: none
+ check_mode:
+ details: support depends on the underlying plugin invoked
+ support: N/A
+ diff_mode:
+ details: support depends on the underlying plugin invoked
+ support: N/A
+ platform:
+ details: The support depends on the availability for the specific plugin for each platform and if fact gathering is able to detect it
+ platforms: all
+notes:
+ - While C(package) abstracts package managers to ease dealing with multiple distributions, package name often differs for the same software.
+
+'''
+EXAMPLES = '''
+- name: Install ntpdate
+ ansible.builtin.package:
+ name: ntpdate
+ state: present
+
+# This uses a variable as this changes per distribution.
+- name: Remove the apache package
+ ansible.builtin.package:
+ name: "{{ apache }}"
+ state: absent
+
+- name: Install the latest version of Apache and MariaDB
+ ansible.builtin.package:
+ name:
+ - httpd
+ - mariadb-server
+ state: latest
+'''
diff --git a/lib/ansible/modules/package_facts.py b/lib/ansible/modules/package_facts.py
new file mode 100644
index 0000000..57c1d3e
--- /dev/null
+++ b/lib/ansible/modules/package_facts.py
@@ -0,0 +1,552 @@
+# (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# most of it copied from AWX's scan_packages module
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: package_facts
+short_description: Package information as facts
+description:
+ - Return information about installed packages as facts.
+options:
+ manager:
+ description:
+ - The package manager used by the system so we can query the package information.
+ - Since 2.8 this is a list and can support multiple package managers per system.
+ - The 'portage' and 'pkg' options were added in version 2.8.
+ - The 'apk' option was added in version 2.11.
+ - The 'pkg_info' option was added in version 2.13.
+ default: ['auto']
+ choices: ['auto', 'rpm', 'apt', 'portage', 'pkg', 'pacman', 'apk', 'pkg_info']
+ type: list
+ elements: str
+ strategy:
+ description:
+ - This option controls how the module queries the package managers on the system.
+ C(first) means it will return only information for the first supported package manager available.
+ C(all) will return information for all supported and available package managers on the system.
+ choices: ['first', 'all']
+ default: 'first'
+ type: str
+ version_added: "2.8"
+version_added: "2.5"
+requirements:
+ - For 'portage' support it requires the C(qlist) utility, which is part of 'app-portage/portage-utils'.
+ - For Debian-based systems C(python-apt) package must be installed on targeted hosts.
+author:
+ - Matthew Jones (@matburt)
+ - Brian Coca (@bcoca)
+ - Adam Miller (@maxamillion)
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.facts
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ facts:
+ support: full
+ platform:
+ platforms: posix
+'''
+
+EXAMPLES = '''
+- name: Gather the package facts
+ ansible.builtin.package_facts:
+ manager: auto
+
+- name: Print the package facts
+ ansible.builtin.debug:
+ var: ansible_facts.packages
+
+- name: Check whether a package called foobar is installed
+ ansible.builtin.debug:
+ msg: "{{ ansible_facts.packages['foobar'] | length }} versions of foobar are installed!"
+ when: "'foobar' in ansible_facts.packages"
+
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Facts to add to ansible_facts.
+ returned: always
+ type: complex
+ contains:
+ packages:
+ description:
+ - Maps the package name to a non-empty list of dicts with package information.
+ - Every dict in the list corresponds to one installed version of the package.
+ - The fields described below are present for all package managers. Depending on the
+ package manager, there might be more fields for a package.
+ returned: when operating system level package manager is specified or auto detected manager
+ type: dict
+ contains:
+ name:
+ description: The package's name.
+ returned: always
+ type: str
+ version:
+ description: The package's version.
+ returned: always
+ type: str
+ source:
+ description: Where information on the package came from.
+ returned: always
+ type: str
+ sample: |-
+ {
+ "packages": {
+ "kernel": [
+ {
+ "name": "kernel",
+ "source": "rpm",
+ "version": "3.10.0",
+ ...
+ },
+ {
+ "name": "kernel",
+ "source": "rpm",
+ "version": "3.10.0",
+ ...
+ },
+ ...
+ ],
+ "kernel-tools": [
+ {
+ "name": "kernel-tools",
+ "source": "rpm",
+ "version": "3.10.0",
+ ...
+ }
+ ],
+ ...
+ }
+ }
+ # Sample rpm
+ {
+ "packages": {
+ "kernel": [
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "514.26.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "514.16.1.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "514.10.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "514.21.1.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "693.2.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ }
+ ],
+ "kernel-tools": [
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel-tools",
+ "release": "693.2.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ }
+ ],
+ "kernel-tools-libs": [
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel-tools-libs",
+ "release": "693.2.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ }
+ ],
+ }
+ }
+ # Sample deb
+ {
+ "packages": {
+ "libbz2-1.0": [
+ {
+ "version": "1.0.6-5",
+ "source": "apt",
+ "arch": "amd64",
+ "name": "libbz2-1.0"
+ }
+ ],
+ "patch": [
+ {
+ "version": "2.7.1-4ubuntu1",
+ "source": "apt",
+ "arch": "amd64",
+ "name": "patch"
+ }
+ ],
+ }
+ }
+ # Sample pkg_info
+ {
+ "packages": {
+ "curl": [
+ {
+ "name": "curl",
+ "source": "pkg_info",
+ "version": "7.79.0"
+ }
+ ],
+ "intel-firmware": [
+ {
+ "name": "intel-firmware",
+ "source": "pkg_info",
+ "version": "20210608v0"
+ }
+ ],
+ }
+ }
+'''
+
+import re
+
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
+from ansible.module_utils.facts.packages import LibMgr, CLIMgr, get_all_pkg_managers
+
+
+class RPM(LibMgr):
+
+ LIB = 'rpm'
+
+ def list_installed(self):
+ return self._lib.TransactionSet().dbMatch()
+
+ def get_package_details(self, package):
+ return dict(name=package[self._lib.RPMTAG_NAME],
+ version=package[self._lib.RPMTAG_VERSION],
+ release=package[self._lib.RPMTAG_RELEASE],
+ epoch=package[self._lib.RPMTAG_EPOCH],
+ arch=package[self._lib.RPMTAG_ARCH],)
+
+ def is_available(self):
+ ''' we expect the python bindings installed, but this gives warning if they are missing and we have rpm cli'''
+ we_have_lib = super(RPM, self).is_available()
+
+ try:
+ get_bin_path('rpm')
+
+ if not we_have_lib and not has_respawned():
+ # try to locate an interpreter with the necessary lib
+ interpreters = ['/usr/libexec/platform-python',
+ '/usr/bin/python3',
+ '/usr/bin/python2']
+ interpreter_path = probe_interpreters_for_module(interpreters, self.LIB)
+ if interpreter_path:
+ respawn_module(interpreter_path)
+ # end of the line for this process; this module will exit when the respawned copy completes
+
+ if not we_have_lib:
+ module.warn('Found "rpm" but %s' % (missing_required_lib(self.LIB)))
+ except ValueError:
+ pass
+
+ return we_have_lib
+
+
+class APT(LibMgr):
+
+ LIB = 'apt'
+
+ def __init__(self):
+ self._cache = None
+ super(APT, self).__init__()
+
+ @property
+ def pkg_cache(self):
+ if self._cache is not None:
+ return self._cache
+
+ self._cache = self._lib.Cache()
+ return self._cache
+
+ def is_available(self):
+ ''' we expect the python bindings installed, but if there is apt/apt-get give warning about missing bindings'''
+ we_have_lib = super(APT, self).is_available()
+ if not we_have_lib:
+ for exe in ('apt', 'apt-get', 'aptitude'):
+ try:
+ get_bin_path(exe)
+ except ValueError:
+ continue
+ else:
+ if not has_respawned():
+ # try to locate an interpreter with the necessary lib
+ interpreters = ['/usr/bin/python3',
+ '/usr/bin/python2']
+ interpreter_path = probe_interpreters_for_module(interpreters, self.LIB)
+ if interpreter_path:
+ respawn_module(interpreter_path)
+ # end of the line for this process; this module will exit here when respawned copy completes
+
+ module.warn('Found "%s" but %s' % (exe, missing_required_lib('apt')))
+ break
+
+ return we_have_lib
+
+ def list_installed(self):
+ # Store the cache to avoid running pkg_cache() for each item in the comprehension, which is very slow
+ cache = self.pkg_cache
+ return [pk for pk in cache.keys() if cache[pk].is_installed]
+
+ def get_package_details(self, package):
+ ac_pkg = self.pkg_cache[package].installed
+ return dict(name=package, version=ac_pkg.version, arch=ac_pkg.architecture, category=ac_pkg.section, origin=ac_pkg.origins[0].origin)
+
+
+class PACMAN(CLIMgr):
+
+ CLI = 'pacman'
+
+ def list_installed(self):
+ locale = get_best_parsable_locale(module)
+ rc, out, err = module.run_command([self._cli, '-Qi'], environ_update=dict(LC_ALL=locale))
+ if rc != 0 or err:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return out.split("\n\n")[:-1]
+
+ def get_package_details(self, package):
+ # parse values of details that might extend over several lines
+ raw_pkg_details = {}
+ last_detail = None
+ for line in package.splitlines():
+ m = re.match(r"([\w ]*[\w]) +: (.*)", line)
+ if m:
+ last_detail = m.group(1)
+ raw_pkg_details[last_detail] = m.group(2)
+ else:
+ # append value to previous detail
+ raw_pkg_details[last_detail] = raw_pkg_details[last_detail] + " " + line.lstrip()
+
+ provides = None
+ if raw_pkg_details['Provides'] != 'None':
+ provides = [
+ p.split('=')[0]
+ for p in raw_pkg_details['Provides'].split(' ')
+ ]
+
+ return {
+ 'name': raw_pkg_details['Name'],
+ 'version': raw_pkg_details['Version'],
+ 'arch': raw_pkg_details['Architecture'],
+ 'provides': provides,
+ }
+
+
+class PKG(CLIMgr):
+
+ CLI = 'pkg'
+ atoms = ['name', 'version', 'origin', 'installed', 'automatic', 'arch', 'category', 'prefix', 'vital']
+
+ def list_installed(self):
+ rc, out, err = module.run_command([self._cli, 'query', "%%%s" % '\t%'.join(['n', 'v', 'R', 't', 'a', 'q', 'o', 'p', 'V'])])
+ if rc != 0 or err:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return out.splitlines()
+
+ def get_package_details(self, package):
+
+ pkg = dict(zip(self.atoms, package.split('\t')))
+
+ if 'arch' in pkg:
+ try:
+ pkg['arch'] = pkg['arch'].split(':')[2]
+ except IndexError:
+ pass
+
+ if 'automatic' in pkg:
+ pkg['automatic'] = bool(int(pkg['automatic']))
+
+ if 'category' in pkg:
+ pkg['category'] = pkg['category'].split('/', 1)[0]
+
+ if 'version' in pkg:
+ if ',' in pkg['version']:
+ pkg['version'], pkg['port_epoch'] = pkg['version'].split(',', 1)
+ else:
+ pkg['port_epoch'] = 0
+
+ if '_' in pkg['version']:
+ pkg['version'], pkg['revision'] = pkg['version'].split('_', 1)
+ else:
+ pkg['revision'] = '0'
+
+ if 'vital' in pkg:
+ pkg['vital'] = bool(int(pkg['vital']))
+
+ return pkg
+
+
+class PORTAGE(CLIMgr):
+
+ CLI = 'qlist'
+ atoms = ['category', 'name', 'version', 'ebuild_revision', 'slots', 'prefixes', 'sufixes']
+
+ def list_installed(self):
+ rc, out, err = module.run_command(' '.join([self._cli, '-Iv', '|', 'xargs', '-n', '1024', 'qatom']), use_unsafe_shell=True)
+ if rc != 0:
+ raise RuntimeError("Unable to list packages rc=%s : %s" % (rc, to_native(err)))
+ return out.splitlines()
+
+ def get_package_details(self, package):
+ return dict(zip(self.atoms, package.split()))
+
+
+class APK(CLIMgr):
+
+ CLI = 'apk'
+
+ def list_installed(self):
+ rc, out, err = module.run_command([self._cli, 'info', '-v'])
+ if rc != 0 or err:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return out.splitlines()
+
+ def get_package_details(self, package):
+ raw_pkg_details = {'name': package, 'version': '', 'release': ''}
+ nvr = package.rsplit('-', 2)
+ try:
+ return {
+ 'name': nvr[0],
+ 'version': nvr[1],
+ 'release': nvr[2],
+ }
+ except IndexError:
+ return raw_pkg_details
+
+
+class PKG_INFO(CLIMgr):
+
+ CLI = 'pkg_info'
+
+ def list_installed(self):
+ rc, out, err = module.run_command([self._cli, '-a'])
+ if rc != 0 or err:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return out.splitlines()
+
+ def get_package_details(self, package):
+ raw_pkg_details = {'name': package, 'version': ''}
+ details = package.split(maxsplit=1)[0].rsplit('-', maxsplit=1)
+
+ try:
+ return {
+ 'name': details[0],
+ 'version': details[1],
+ }
+ except IndexError:
+ return raw_pkg_details
+
+
+def main():
+
+ # get supported pkg managers
+ PKG_MANAGERS = get_all_pkg_managers()
+ PKG_MANAGER_NAMES = [x.lower() for x in PKG_MANAGERS.keys()]
+
+ # start work
+ global module
+ module = AnsibleModule(argument_spec=dict(manager={'type': 'list', 'elements': 'str', 'default': ['auto']},
+ strategy={'choices': ['first', 'all'], 'default': 'first'}),
+ supports_check_mode=True)
+ packages = {}
+ results = {'ansible_facts': {}}
+ managers = [x.lower() for x in module.params['manager']]
+ strategy = module.params['strategy']
+
+ if 'auto' in managers:
+ # keep order from user, we do dedupe below
+ managers.extend(PKG_MANAGER_NAMES)
+ managers.remove('auto')
+
+ unsupported = set(managers).difference(PKG_MANAGER_NAMES)
+ if unsupported:
+ if 'auto' in module.params['manager']:
+ msg = 'Could not auto detect a usable package manager, check warnings for details.'
+ else:
+ msg = 'Unsupported package managers requested: %s' % (', '.join(unsupported))
+ module.fail_json(msg=msg)
+
+ found = 0
+ seen = set()
+ for pkgmgr in managers:
+
+ if found and strategy == 'first':
+ break
+
+ # dedupe as per above
+ if pkgmgr in seen:
+ continue
+ seen.add(pkgmgr)
+ try:
+ try:
+ # manager throws exception on init (calls self.test) if not usable.
+ manager = PKG_MANAGERS[pkgmgr]()
+ if manager.is_available():
+ found += 1
+ packages.update(manager.get_packages())
+
+ except Exception as e:
+ if pkgmgr in module.params['manager']:
+ module.warn('Requested package manager %s was not usable by this module: %s' % (pkgmgr, to_text(e)))
+ continue
+
+ except Exception as e:
+ if pkgmgr in module.params['manager']:
+ module.warn('Failed to retrieve packages with %s: %s' % (pkgmgr, to_text(e)))
+
+ if found == 0:
+ msg = ('Could not detect a supported package manager from the following list: %s, '
+ 'or the required Python library is not installed. Check warnings for details.' % managers)
+ module.fail_json(msg=msg)
+
+ # Set the facts, this will override the facts in ansible_facts that might exist from previous runs
+ # when using operating system level or distribution package managers
+ results['ansible_facts']['packages'] = packages
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/pause.py b/lib/ansible/modules/pause.py
new file mode 100644
index 0000000..09061dd
--- /dev/null
+++ b/lib/ansible/modules/pause.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pause
+short_description: Pause playbook execution
+description:
+ - Pauses playbook execution for a set amount of time, or until a prompt is acknowledged.
+ All parameters are optional. The default behavior is to pause with a prompt.
+ - To pause/wait/sleep per host, use the M(ansible.builtin.wait_for) module.
+ - You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely.
+ To continue early press C(ctrl+c) and then C(c). To abort a playbook press C(ctrl+c) and then C(a).
+ - The pause module integrates into async/parallelized playbooks without any special considerations (see Rolling Updates).
+ When using pauses with the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts.
+ - This module is also supported for Windows targets.
+version_added: "0.8"
+options:
+ minutes:
+ description:
+ - A positive number of minutes to pause for.
+ seconds:
+ description:
+ - A positive number of seconds to pause for.
+ prompt:
+ description:
+ - Optional text to use for the prompt message.
+ echo:
+ description:
+ - Controls whether or not keyboard input is shown when typing.
+ - Has no effect if 'seconds' or 'minutes' is set.
+ type: bool
+ default: 'yes'
+ version_added: 2.5
+author: "Tim Bielawa (@tbielawa)"
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ become:
+ support: none
+ bypass_host_loop:
+ support: full
+ check_mode:
+ support: full
+ connection:
+ support: none
+ delegation:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ platforms: all
+notes:
+ - Starting in 2.2, if you specify 0 or negative for minutes or seconds, it will wait for 1 second, previously it would wait indefinitely.
+ - User input is not captured or echoed, regardless of echo setting, when minutes or seconds is specified.
+'''
+
+EXAMPLES = '''
+- name: Pause for 5 minutes to build app cache
+ ansible.builtin.pause:
+ minutes: 5
+
+- name: Pause until you can verify updates to an application were successful
+ ansible.builtin.pause:
+
+- name: A helpful reminder of what to look out for post-update
+ ansible.builtin.pause:
+ prompt: "Make sure org.foo.FooOverload exception is not present"
+
+- name: Pause to get some sensitive input
+ ansible.builtin.pause:
+ prompt: "Enter a secret"
+ echo: no
+'''
+
+RETURN = '''
+user_input:
+ description: User input from interactive console
+ returned: if no waiting time set
+ type: str
+ sample: Example user input
+start:
+ description: Time when started pausing
+ returned: always
+ type: str
+ sample: "2017-02-23 14:35:07.298862"
+stop:
+ description: Time when ended pausing
+ returned: always
+ type: str
+ sample: "2017-02-23 14:35:09.552594"
+delta:
+ description: Time paused in seconds
+ returned: always
+ type: str
+ sample: 2
+stdout:
+ description: Output of pause module
+ returned: always
+ type: str
+ sample: Paused for 0.04 minutes
+echo:
+ description: Value of echo setting
+ returned: always
+ type: bool
+ sample: true
+'''
diff --git a/lib/ansible/modules/ping.py b/lib/ansible/modules/ping.py
new file mode 100644
index 0000000..f6267a8
--- /dev/null
+++ b/lib/ansible/modules/ping.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ping
+version_added: historical
+short_description: Try to connect to host, verify a usable python and return C(pong) on success
+description:
+ - A trivial test module, this module always returns C(pong) on successful
+ contact. It does not make sense in playbooks, but it is useful from
+ C(/usr/bin/ansible) to verify the ability to login and that a usable Python is configured.
+ - This is NOT ICMP ping, this is just a trivial test module that requires Python on the remote-node.
+ - For Windows targets, use the M(ansible.windows.win_ping) module instead.
+ - For Network targets, use the M(ansible.netcommon.net_ping) module instead.
+options:
+ data:
+ description:
+ - Data to return for the C(ping) return value.
+ - If this parameter is set to C(crash), the module will cause an exception.
+ type: str
+ default: pong
+extends_documentation_fragment:
+ - action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+seealso:
+ - module: ansible.netcommon.net_ping
+ - module: ansible.windows.win_ping
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = '''
+# Test we can logon to 'webservers' and execute python with json lib.
+# ansible webservers -m ansible.builtin.ping
+
+- name: Example from an Ansible Playbook
+ ansible.builtin.ping:
+
+- name: Induce an exception to see what happens
+ ansible.builtin.ping:
+ data: crash
+'''
+
+RETURN = '''
+ping:
+ description: Value provided with the data parameter.
+ returned: success
+ type: str
+ sample: pong
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ data=dict(type='str', default='pong'),
+ ),
+ supports_check_mode=True
+ )
+
+ if module.params['data'] == 'crash':
+ raise Exception("boom")
+
+ result = dict(
+ ping=module.params['data'],
+ )
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/pip.py b/lib/ansible/modules/pip.py
new file mode 100644
index 0000000..a9930cc
--- /dev/null
+++ b/lib/ansible/modules/pip.py
@@ -0,0 +1,832 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Matt Wright <matt@nobien.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pip
+short_description: Manages Python library dependencies
+description:
+ - "Manage Python library dependencies. To use this module, one of the following keys is required: C(name)
+ or C(requirements)."
+version_added: "0.7"
+options:
+ name:
+ description:
+ - The name of a Python library to install or the url(bzr+,hg+,git+,svn+) of the remote package.
+ - This can be a list (since 2.2) and contain version specifiers (since 2.7).
+ type: list
+ elements: str
+ version:
+ description:
+ - The version number to install of the Python library specified in the I(name) parameter.
+ type: str
+ requirements:
+ description:
+ - The path to a pip requirements file, which should be local to the remote system.
+ File can be specified as a relative path if using the chdir option.
+ type: str
+ virtualenv:
+ description:
+ - An optional path to a I(virtualenv) directory to install into.
+ It cannot be specified together with the 'executable' parameter
+ (added in 2.1).
+ If the virtualenv does not exist, it will be created before installing
+ packages. The optional virtualenv_site_packages, virtualenv_command,
+ and virtualenv_python options affect the creation of the virtualenv.
+ type: path
+ virtualenv_site_packages:
+ description:
+ - Whether the virtual environment will inherit packages from the
+ global site-packages directory. Note that if this setting is
+ changed on an already existing virtual environment it will not
+ have any effect, the environment must be deleted and newly
+ created.
+ type: bool
+ default: "no"
+ version_added: "1.0"
+ virtualenv_command:
+ description:
+ - The command or a pathname to the command to create the virtual
+ environment with. For example C(pyvenv), C(virtualenv),
+ C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv).
+ type: path
+ default: virtualenv
+ version_added: "1.1"
+ virtualenv_python:
+ description:
+ - The Python executable used for creating the virtual environment.
+ For example C(python3.5), C(python2.7). When not specified, the
+ Python version used to run the ansible module is used. This parameter
+ should not be used when C(virtualenv_command) is using C(pyvenv) or
+ the C(-m venv) module.
+ type: str
+ version_added: "2.0"
+ state:
+ description:
+ - The state of module
+ - The 'forcereinstall' option is only available in Ansible 2.1 and above.
+ type: str
+ choices: [ absent, forcereinstall, latest, present ]
+ default: present
+ extra_args:
+ description:
+ - Extra arguments passed to pip.
+ type: str
+ version_added: "1.0"
+ editable:
+ description:
+ - Pass the editable flag.
+ type: bool
+ default: 'no'
+ version_added: "2.0"
+ chdir:
+ description:
+ - cd into this directory before running the command
+ type: path
+ version_added: "1.3"
+ executable:
+ description:
+ - The explicit executable or pathname for the pip executable,
+ if different from the Ansible Python interpreter. For
+ example C(pip3.3), if there are both Python 2.7 and 3.3 installations
+ in the system and you want to run pip for the Python 3.3 installation.
+ - Mutually exclusive with I(virtualenv) (added in 2.1).
+ - Does not affect the Ansible Python interpreter.
+ - The setuptools package must be installed for both the Ansible Python interpreter
+ and for the version of Python specified by this option.
+ type: path
+ version_added: "1.3"
+ umask:
+ description:
+ - The system umask to apply before installing the pip package. This is
+ useful, for example, when installing on systems that have a very
+ restrictive umask by default (e.g., "0077") and you want to pip install
+ packages which are to be used by all users. Note that this requires you
+ to specify desired umask mode as an octal string, (e.g., "0022").
+ type: str
+ version_added: "2.1"
+extends_documentation_fragment:
+ - action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - The virtualenv (U(http://www.virtualenv.org/)) must be
+ installed on the remote host if the virtualenv parameter is specified and
+ the virtualenv needs to be created.
+ - Although it executes using the Ansible Python interpreter, the pip module shells out to
+ run the actual pip command, so it can use any pip version you specify with I(executable).
+ By default, it uses the pip version for the Ansible Python interpreter. For example, pip3 on python 3, and pip2 or pip on python 2.
+ - The interpreter used by Ansible
+ (see R(ansible_python_interpreter, ansible_python_interpreter))
+ requires the setuptools package, regardless of the version of pip set with
+ the I(executable) option.
+requirements:
+- pip
+- virtualenv
+- setuptools
+author:
+- Matt Wright (@mattupstate)
+'''
+
+EXAMPLES = '''
+- name: Install bottle python package
+ ansible.builtin.pip:
+ name: bottle
+
+- name: Install bottle python package on version 0.11
+ ansible.builtin.pip:
+ name: bottle==0.11
+
+- name: Install bottle python package with version specifiers
+ ansible.builtin.pip:
+ name: bottle>0.10,<0.20,!=0.11
+
+- name: Install multi python packages with version specifiers
+ ansible.builtin.pip:
+ name:
+ - django>1.11.0,<1.12.0
+ - bottle>0.10,<0.20,!=0.11
+
+- name: Install python package using a proxy
+ ansible.builtin.pip:
+ name: six
+ environment:
+ http_proxy: 'http://127.0.0.1:8080'
+ https_proxy: 'https://127.0.0.1:8080'
+
+# You do not have to supply '-e' option in extra_args
+- name: Install MyApp using one of the remote protocols (bzr+,hg+,git+,svn+)
+ ansible.builtin.pip:
+ name: svn+http://myrepo/svn/MyApp#egg=MyApp
+
+- name: Install MyApp using one of the remote protocols (bzr+,hg+,git+)
+ ansible.builtin.pip:
+ name: git+http://myrepo/app/MyApp
+
+- name: Install MyApp from local tarball
+ ansible.builtin.pip:
+ name: file:///path/to/MyApp.tar.gz
+
+- name: Install bottle into the specified (virtualenv), inheriting none of the globally installed modules
+ ansible.builtin.pip:
+ name: bottle
+ virtualenv: /my_app/venv
+
+- name: Install bottle into the specified (virtualenv), inheriting globally installed modules
+ ansible.builtin.pip:
+ name: bottle
+ virtualenv: /my_app/venv
+ virtualenv_site_packages: yes
+
+- name: Install bottle into the specified (virtualenv), using Python 2.7
+ ansible.builtin.pip:
+ name: bottle
+ virtualenv: /my_app/venv
+ virtualenv_command: virtualenv-2.7
+
+- name: Install bottle within a user home directory
+ ansible.builtin.pip:
+ name: bottle
+ extra_args: --user
+
+- name: Install specified python requirements
+ ansible.builtin.pip:
+ requirements: /my_app/requirements.txt
+
+- name: Install specified python requirements in indicated (virtualenv)
+ ansible.builtin.pip:
+ requirements: /my_app/requirements.txt
+ virtualenv: /my_app/venv
+
+- name: Install specified python requirements and custom Index URL
+ ansible.builtin.pip:
+ requirements: /my_app/requirements.txt
+ extra_args: -i https://example.com/pypi/simple
+
+- name: Install specified python requirements offline from a local directory with downloaded packages
+ ansible.builtin.pip:
+ requirements: /my_app/requirements.txt
+ extra_args: "--no-index --find-links=file:///my_downloaded_packages_dir"
+
+- name: Install bottle for Python 3.3 specifically, using the 'pip3.3' executable
+ ansible.builtin.pip:
+ name: bottle
+ executable: pip3.3
+
+- name: Install bottle, forcing reinstallation if it's already installed
+ ansible.builtin.pip:
+ name: bottle
+ state: forcereinstall
+
+- name: Install bottle while ensuring the umask is 0022 (to ensure other users can use it)
+ ansible.builtin.pip:
+ name: bottle
+ umask: "0022"
+ become: True
+'''
+
+RETURN = '''
+cmd:
+ description: pip command used by the module
+ returned: success
+ type: str
+ sample: pip2 install ansible six
+name:
+ description: list of python modules targeted by pip
+ returned: success
+ type: list
+ sample: ['ansible', 'six']
+requirements:
+ description: Path to the requirements file
+ returned: success, if a requirements file was provided
+ type: str
+ sample: "/srv/git/project/requirements.txt"
+version:
+ description: Version of the package specified in 'name'
+ returned: success, if a name and version were provided
+ type: str
+ sample: "2.5.1"
+virtualenv:
+ description: Path to the virtualenv
+ returned: success, if a virtualenv path was provided
+ type: str
+ sample: "/tmp/virtualenv"
+'''
+
+import os
+import re
+import sys
+import tempfile
+import operator
+import shlex
+import traceback
+import types
+
+from ansible.module_utils.compat.version import LooseVersion
+
+SETUPTOOLS_IMP_ERR = None
+try:
+ from pkg_resources import Requirement
+
+ HAS_SETUPTOOLS = True
+except ImportError:
+ HAS_SETUPTOOLS = False
+ SETUPTOOLS_IMP_ERR = traceback.format_exc()
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule, is_executable, missing_required_lib
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.six import PY3
+
+
+#: Python one-liners to be run at the command line that will determine the
+# installed version for these special libraries. These are libraries that
+# don't end up in the output of pip freeze.
+_SPECIAL_PACKAGE_CHECKERS = {'setuptools': 'import setuptools; print(setuptools.__version__)',
+ 'pip': 'import pkg_resources; print(pkg_resources.get_distribution("pip").version)'}
+
+_VCS_RE = re.compile(r'(svn|git|hg|bzr)\+')
+
+op_dict = {">=": operator.ge, "<=": operator.le, ">": operator.gt,
+ "<": operator.lt, "==": operator.eq, "!=": operator.ne, "~=": operator.ge}
+
+
+def _is_vcs_url(name):
+ """Test whether a name is a vcs url or not."""
+ return re.match(_VCS_RE, name)
+
+
+def _is_package_name(name):
+ """Test whether the name is a package name or a version specifier."""
+ return not name.lstrip().startswith(tuple(op_dict.keys()))
+
+
+def _recover_package_name(names):
+ """Recover package names as list from user's raw input.
+
+ :input: a mixed and invalid list of names or version specifiers
+ :return: a list of valid package name
+
+ eg.
+ input: ['django>1.11.1', '<1.11.3', 'ipaddress', 'simpleproject>1.1.0', '<2.0.0']
+ return: ['django>1.11.1,<1.11.3', 'ipaddress', 'simpleproject>1.1.0,<2.0.0']
+
+ input: ['django>1.11.1,<1.11.3,ipaddress', 'simpleproject>1.1.0,<2.0.0']
+ return: ['django>1.11.1,<1.11.3', 'ipaddress', 'simpleproject>1.1.0,<2.0.0']
+ """
+ # rebuild input name to a flat list so we can tolerate any combination of input
+ tmp = []
+ for one_line in names:
+ tmp.extend(one_line.split(","))
+ names = tmp
+
+ # reconstruct the names
+ name_parts = []
+ package_names = []
+ in_brackets = False
+ for name in names:
+ if _is_package_name(name) and not in_brackets:
+ if name_parts:
+ package_names.append(",".join(name_parts))
+ name_parts = []
+ if "[" in name:
+ in_brackets = True
+ if in_brackets and "]" in name:
+ in_brackets = False
+ name_parts.append(name)
+ package_names.append(",".join(name_parts))
+ return package_names
+
+
+def _get_cmd_options(module, cmd):
+ thiscmd = cmd + " --help"
+ rc, stdout, stderr = module.run_command(thiscmd)
+ if rc != 0:
+ module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr))
+
+ words = stdout.strip().split()
+ cmd_options = [x for x in words if x.startswith('--')]
+ return cmd_options
+
+
+def _get_packages(module, pip, chdir):
+ '''Return results of pip command to get packages.'''
+ # Try 'pip list' command first.
+ command = pip + ['list', '--format=freeze']
+ locale = get_best_parsable_locale(module)
+ lang_env = {'LANG': locale, 'LC_ALL': locale, 'LC_MESSAGES': locale}
+ rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env)
+
+ # If there was an error (pip version too old) then use 'pip freeze'.
+ if rc != 0:
+ command = pip + ['freeze']
+ rc, out, err = module.run_command(command, cwd=chdir)
+ if rc != 0:
+ _fail(module, command, out, err)
+
+ return ' '.join(command), out, err
+
+
+def _is_present(module, req, installed_pkgs, pkg_command):
+ '''Return whether or not package is installed.'''
+ for pkg in installed_pkgs:
+ if '==' in pkg:
+ pkg_name, pkg_version = pkg.split('==')
+ pkg_name = Package.canonicalize_name(pkg_name)
+ else:
+ continue
+
+ if pkg_name == req.package_name and req.is_satisfied_by(pkg_version):
+ return True
+
+ return False
+
+
+def _get_pip(module, env=None, executable=None):
+ # Older pip only installed under the "/usr/bin/pip" name. Many Linux
+ # distros install it there.
+ # By default, we try to use pip required for the current python
+ # interpreter, so people can use pip to install modules dependencies
+ candidate_pip_basenames = ('pip2', 'pip')
+ if PY3:
+ # pip under python3 installs the "/usr/bin/pip3" name
+ candidate_pip_basenames = ('pip3',)
+
+ pip = None
+ if executable is not None:
+ if os.path.isabs(executable):
+ pip = executable
+ else:
+ # If you define your own executable that executable should be the only candidate.
+ # As noted in the docs, executable doesn't work with virtualenvs.
+ candidate_pip_basenames = (executable,)
+ elif executable is None and env is None and _have_pip_module():
+ # If no executable or virtualenv were specified, use the pip module for the current Python interpreter if available.
+ # Use of `__main__` is required to support Python 2.6 since support for executing packages with `runpy` was added in Python 2.7.
+ # Without it Python 2.6 gives the following error: pip is a package and cannot be directly executed
+ pip = [sys.executable, '-m', 'pip.__main__']
+
+ if pip is None:
+ if env is None:
+ opt_dirs = []
+ for basename in candidate_pip_basenames:
+ pip = module.get_bin_path(basename, False, opt_dirs)
+ if pip is not None:
+ break
+ else:
+ # For-else: Means that we did not break out of the loop
+ # (therefore, that pip was not found)
+ module.fail_json(msg='Unable to find any of %s to use. pip'
+ ' needs to be installed.' % ', '.join(candidate_pip_basenames))
+ else:
+ # If we're using a virtualenv we must use the pip from the
+ # virtualenv
+ venv_dir = os.path.join(env, 'bin')
+ candidate_pip_basenames = (candidate_pip_basenames[0], 'pip')
+ for basename in candidate_pip_basenames:
+ candidate = os.path.join(venv_dir, basename)
+ if os.path.exists(candidate) and is_executable(candidate):
+ pip = candidate
+ break
+ else:
+ # For-else: Means that we did not break out of the loop
+ # (therefore, that pip was not found)
+ module.fail_json(msg='Unable to find pip in the virtualenv, %s, ' % env +
+ 'under any of these names: %s. ' % (', '.join(candidate_pip_basenames)) +
+ 'Make sure pip is present in the virtualenv.')
+
+ if not isinstance(pip, list):
+ pip = [pip]
+
+ return pip
+
+
+def _have_pip_module(): # type: () -> bool
+ """Return True if the `pip` module can be found using the current Python interpreter, otherwise return False."""
+ try:
+ from importlib.util import find_spec
+ except ImportError:
+ find_spec = None # type: ignore[assignment] # type: ignore[no-redef]
+
+ if find_spec:
+ # noinspection PyBroadException
+ try:
+ # noinspection PyUnresolvedReferences
+ found = bool(find_spec('pip'))
+ except Exception:
+ found = False
+ else:
+ # noinspection PyDeprecation
+ import imp
+
+ # noinspection PyBroadException
+ try:
+ # noinspection PyDeprecation
+ imp.find_module('pip')
+ except Exception:
+ found = False
+ else:
+ found = True
+
+ return found
+
+
+def _fail(module, cmd, out, err):
+ msg = ''
+ if out:
+ msg += "stdout: %s" % (out, )
+ if err:
+ msg += "\n:stderr: %s" % (err, )
+ module.fail_json(cmd=cmd, msg=msg)
+
+
+def _get_package_info(module, package, env=None):
+ """This is only needed for special packages which do not show up in pip freeze
+
+ pip and setuptools fall into this category.
+
+ :returns: a string containing the version number if the package is
+ installed. None if the package is not installed.
+ """
+ if env:
+ opt_dirs = ['%s/bin' % env]
+ else:
+ opt_dirs = []
+ python_bin = module.get_bin_path('python', False, opt_dirs)
+
+ if python_bin is None:
+ formatted_dep = None
+ else:
+ rc, out, err = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]])
+ if rc:
+ formatted_dep = None
+ else:
+ formatted_dep = '%s==%s' % (package, out.strip())
+ return formatted_dep
+
+
+def setup_virtualenv(module, env, chdir, out, err):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ cmd = shlex.split(module.params['virtualenv_command'])
+
+ # Find the binary for the command in the PATH
+ # and switch the command for the explicit path.
+ if os.path.basename(cmd[0]) == cmd[0]:
+ cmd[0] = module.get_bin_path(cmd[0], True)
+
+ # Add the system-site-packages option if that
+ # is enabled, otherwise explicitly set the option
+ # to not use system-site-packages if that is an
+ # option provided by the command's help function.
+ if module.params['virtualenv_site_packages']:
+ cmd.append('--system-site-packages')
+ else:
+ cmd_opts = _get_cmd_options(module, cmd[0])
+ if '--no-site-packages' in cmd_opts:
+ cmd.append('--no-site-packages')
+
+ virtualenv_python = module.params['virtualenv_python']
+ # -p is a virtualenv option, not compatible with pyenv or venv
+ # this conditional validates if the command being used is not any of them
+ if not any(ex in module.params['virtualenv_command'] for ex in ('pyvenv', '-m venv')):
+ if virtualenv_python:
+ cmd.append('-p%s' % virtualenv_python)
+ elif PY3:
+ # Ubuntu currently has a patch making virtualenv always
+ # try to use python2. Since Ubuntu16 works without
+ # python2 installed, this is a problem. This code mimics
+ # the upstream behaviour of using the python which invoked
+ # virtualenv to determine which python is used inside of
+ # the virtualenv (when none are specified).
+ cmd.append('-p%s' % sys.executable)
+
+ # if venv or pyvenv are used and virtualenv_python is defined, then
+ # virtualenv_python is ignored, this has to be acknowledged
+ elif module.params['virtualenv_python']:
+ module.fail_json(
+ msg='virtualenv_python should not be used when'
+ ' using the venv module or pyvenv as virtualenv_command'
+ )
+
+ cmd.append(env)
+ rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)
+ out += out_venv
+ err += err_venv
+ if rc != 0:
+ _fail(module, cmd, out, err)
+ return out, err
+
+
+class Package:
+ """Python distribution package metadata wrapper.
+
+ A wrapper class for Requirement, which provides
+ API to parse package name, version specifier,
+ test whether a package is already satisfied.
+ """
+
+ _CANONICALIZE_RE = re.compile(r'[-_.]+')
+
+ def __init__(self, name_string, version_string=None):
+ self._plain_package = False
+ self.package_name = name_string
+ self._requirement = None
+
+ if version_string:
+ version_string = version_string.lstrip()
+ separator = '==' if version_string[0].isdigit() else ' '
+ name_string = separator.join((name_string, version_string))
+ try:
+ self._requirement = Requirement.parse(name_string)
+ # old pkg_resource will replace 'setuptools' with 'distribute' when it's already installed
+ if self._requirement.project_name == "distribute" and "setuptools" in name_string:
+ self.package_name = "setuptools"
+ self._requirement.project_name = "setuptools"
+ else:
+ self.package_name = Package.canonicalize_name(self._requirement.project_name)
+ self._plain_package = True
+ except ValueError as e:
+ pass
+
+ @property
+ def has_version_specifier(self):
+ if self._plain_package:
+ return bool(self._requirement.specs)
+ return False
+
+ def is_satisfied_by(self, version_to_test):
+ if not self._plain_package:
+ return False
+ try:
+ return self._requirement.specifier.contains(version_to_test, prereleases=True)
+ except AttributeError:
+ # old setuptools has no specifier, do fallback
+ version_to_test = LooseVersion(version_to_test)
+ return all(
+ op_dict[op](version_to_test, LooseVersion(ver))
+ for op, ver in self._requirement.specs
+ )
+
+ @staticmethod
+ def canonicalize_name(name):
+ # This is taken from PEP 503.
+ return Package._CANONICALIZE_RE.sub("-", name).lower()
+
+ def __str__(self):
+ if self._plain_package:
+ return to_native(self._requirement)
+ return self.package_name
+
+
+def main():
+ state_map = dict(
+ present=['install'],
+ absent=['uninstall', '-y'],
+ latest=['install', '-U'],
+ forcereinstall=['install', '-U', '--force-reinstall'],
+ )
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=list(state_map.keys())),
+ name=dict(type='list', elements='str'),
+ version=dict(type='str'),
+ requirements=dict(type='str'),
+ virtualenv=dict(type='path'),
+ virtualenv_site_packages=dict(type='bool', default=False),
+ virtualenv_command=dict(type='path', default='virtualenv'),
+ virtualenv_python=dict(type='str'),
+ extra_args=dict(type='str'),
+ editable=dict(type='bool', default=False),
+ chdir=dict(type='path'),
+ executable=dict(type='path'),
+ umask=dict(type='str'),
+ ),
+ required_one_of=[['name', 'requirements']],
+ mutually_exclusive=[['name', 'requirements'], ['executable', 'virtualenv']],
+ supports_check_mode=True,
+ )
+
+ if not HAS_SETUPTOOLS:
+ module.fail_json(msg=missing_required_lib("setuptools"),
+ exception=SETUPTOOLS_IMP_ERR)
+
+ state = module.params['state']
+ name = module.params['name']
+ version = module.params['version']
+ requirements = module.params['requirements']
+ extra_args = module.params['extra_args']
+ chdir = module.params['chdir']
+ umask = module.params['umask']
+ env = module.params['virtualenv']
+
+ venv_created = False
+ if env and chdir:
+ env = os.path.join(chdir, env)
+
+ if umask and not isinstance(umask, int):
+ try:
+ umask = int(umask, 8)
+ except Exception:
+ module.fail_json(msg="umask must be an octal integer",
+ details=to_native(sys.exc_info()[1]))
+
+ old_umask = None
+ if umask is not None:
+ old_umask = os.umask(umask)
+ try:
+ if state == 'latest' and version is not None:
+ module.fail_json(msg='version is incompatible with state=latest')
+
+ if chdir is None:
+ # this is done to avoid permissions issues with privilege escalation and virtualenvs
+ chdir = tempfile.gettempdir()
+
+ err = ''
+ out = ''
+
+ if env:
+ if not os.path.exists(os.path.join(env, 'bin', 'activate')):
+ venv_created = True
+ out, err = setup_virtualenv(module, env, chdir, out, err)
+
+ pip = _get_pip(module, env, module.params['executable'])
+
+ cmd = pip + state_map[state]
+
+ # If there's a virtualenv we want things we install to be able to use other
+ # installations that exist as binaries within this virtualenv. Example: we
+ # install cython and then gevent -- gevent needs to use the cython binary,
+ # not just a python package that will be found by calling the right python.
+ # So if there's a virtualenv, we add that bin/ to the beginning of the PATH
+ # in run_command by setting path_prefix here.
+ path_prefix = None
+ if env:
+ path_prefix = os.path.join(env, 'bin')
+
+ # Automatically apply -e option to extra_args when source is a VCS url. VCS
+ # includes those beginning with svn+, git+, hg+ or bzr+
+ has_vcs = False
+ if name:
+ for pkg in name:
+ if pkg and _is_vcs_url(pkg):
+ has_vcs = True
+ break
+
+ # convert raw input package names to Package instances
+ packages = [Package(pkg) for pkg in _recover_package_name(name)]
+ # check invalid combination of arguments
+ if version is not None:
+ if len(packages) > 1:
+ module.fail_json(
+ msg="'version' argument is ambiguous when installing multiple package distributions. "
+ "Please specify version restrictions next to each package in 'name' argument."
+ )
+ if packages[0].has_version_specifier:
+ module.fail_json(
+ msg="The 'version' argument conflicts with any version specifier provided along with a package name. "
+ "Please keep the version specifier, but remove the 'version' argument."
+ )
+ # if the version specifier is provided by version, append that into the package
+ packages[0] = Package(to_native(packages[0]), version)
+
+ if module.params['editable']:
+ args_list = [] # used if extra_args is not used at all
+ if extra_args:
+ args_list = extra_args.split(' ')
+ if '-e' not in args_list:
+ args_list.append('-e')
+ # Ok, we will reconstruct the option string
+ extra_args = ' '.join(args_list)
+
+ if extra_args:
+ cmd.extend(shlex.split(extra_args))
+
+ if name:
+ cmd.extend(to_native(p) for p in packages)
+ elif requirements:
+ cmd.extend(['-r', requirements])
+ else:
+ module.exit_json(
+ changed=False,
+ warnings=["No valid name or requirements file found."],
+ )
+
+ if module.check_mode:
+ if extra_args or requirements or state == 'latest' or not name:
+ module.exit_json(changed=True)
+
+ pkg_cmd, out_pip, err_pip = _get_packages(module, pip, chdir)
+
+ out += out_pip
+ err += err_pip
+
+ changed = False
+ if name:
+ pkg_list = [p for p in out.split('\n') if not p.startswith('You are using') and not p.startswith('You should consider') and p]
+
+ if pkg_cmd.endswith(' freeze') and ('pip' in name or 'setuptools' in name):
+ # Older versions of pip (pre-1.3) do not have pip list.
+ # pip freeze does not list setuptools or pip in its output
+ # So we need to get those via a specialcase
+ for pkg in ('setuptools', 'pip'):
+ if pkg in name:
+ formatted_dep = _get_package_info(module, pkg, env)
+ if formatted_dep is not None:
+ pkg_list.append(formatted_dep)
+ out += '%s\n' % formatted_dep
+
+ for package in packages:
+ is_present = _is_present(module, package, pkg_list, pkg_cmd)
+ if (state == 'present' and not is_present) or (state == 'absent' and is_present):
+ changed = True
+ break
+ module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err)
+
+ out_freeze_before = None
+ if requirements or has_vcs:
+ _, out_freeze_before, _ = _get_packages(module, pip, chdir)
+
+ rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
+ out += out_pip
+ err += err_pip
+ if rc == 1 and state == 'absent' and \
+ ('not installed' in out_pip or 'not installed' in err_pip):
+ pass # rc is 1 when attempting to uninstall non-installed package
+ elif rc != 0:
+ _fail(module, cmd, out, err)
+
+ if state == 'absent':
+ changed = 'Successfully uninstalled' in out_pip
+ else:
+ if out_freeze_before is None:
+ changed = 'Successfully installed' in out_pip
+ else:
+ _, out_freeze_after, _ = _get_packages(module, pip, chdir)
+ changed = out_freeze_before != out_freeze_after
+
+ changed = changed or venv_created
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, version=version,
+ state=state, requirements=requirements, virtualenv=env,
+ stdout=out, stderr=err)
+ finally:
+ if old_umask is not None:
+ os.umask(old_umask)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/raw.py b/lib/ansible/modules/raw.py
new file mode 100644
index 0000000..dc40a73
--- /dev/null
+++ b/lib/ansible/modules/raw.py
@@ -0,0 +1,88 @@
+# This is a virtual module that is entirely implemented server side
+
+# Copyright: (c) 2012, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: raw
+short_description: Executes a low-down and dirty command
+version_added: historical
+options:
+ free_form:
+ description:
+ - The raw module takes a free form command to run.
+ - There is no parameter actually named 'free form'; see the examples!
+ required: true
+ executable:
+ description:
+ - Change the shell used to execute the command. Should be an absolute path to the executable.
+ - When using privilege escalation (C(become)) a default shell will be assigned if one is not provided
+ as privilege escalation requires a shell.
+ version_added: "1.0"
+description:
+ - Executes a low-down and dirty SSH command, not going through the module
+ subsystem.
+ - This is useful and should only be done in a few cases. A common
+ case is installing C(python) on a system without python installed by default.
+ Another is speaking to any devices such as
+ routers that do not have any Python installed. In any other case, using
+ the M(ansible.builtin.shell) or M(ansible.builtin.command) module is much more appropriate.
+ - Arguments given to C(raw) are run directly through the configured remote shell.
+ - Standard output, error output and return code are returned when
+ available.
+ - There is no change handler support for this module.
+ - This module does not require python on the remote system, much like
+ the M(ansible.builtin.script) module.
+ - This module is also supported for Windows targets.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.raw
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ details: This action is one of the few that requires no Python on the remote as it passes the command directly into the connection string
+ platforms: all
+ raw:
+ support: full
+notes:
+ - "If using raw from a playbook, you may need to disable fact gathering
+ using C(gather_facts: no) if you're using C(raw) to bootstrap python
+ onto the machine."
+ - If you want to execute a command securely and predictably, it may be
+ better to use the M(ansible.builtin.command) or M(ansible.builtin.shell) modules instead.
+ - The C(environment) keyword does not work with raw normally, it requires a shell
+ which means it only works if C(executable) is set or using the module
+ with privilege escalation (C(become)).
+seealso:
+- module: ansible.builtin.command
+- module: ansible.builtin.shell
+- module: ansible.windows.win_command
+- module: ansible.windows.win_shell
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Bootstrap a host without python2 installed
+ ansible.builtin.raw: dnf install -y python2 python2-dnf libselinux-python
+
+- name: Run a command that uses non-posix shell-isms (in this example /bin/sh doesn't handle redirection and wildcards together but bash does)
+ ansible.builtin.raw: cat < /tmp/*txt
+ args:
+ executable: /bin/bash
+
+- name: Safely use templated variables. Always use quote filter to avoid injection issues.
+ ansible.builtin.raw: "{{ package_mgr|quote }} {{ pkg_flags|quote }} install {{ python|quote }}"
+
+- name: List user accounts on a Windows system
+ ansible.builtin.raw: Get-WmiObject -Class Win32_UserAccount
+'''
diff --git a/lib/ansible/modules/reboot.py b/lib/ansible/modules/reboot.py
new file mode 100644
index 0000000..71e6294
--- /dev/null
+++ b/lib/ansible/modules/reboot.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: reboot
+short_description: Reboot a machine
+notes:
+ - C(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use C(search_paths)
+ to specify locations to search if the default paths do not work.
+description:
+ - Reboot a machine, wait for it to go down, come back up, and respond to commands.
+ - For Windows targets, use the M(ansible.windows.win_reboot) module instead.
+version_added: "2.7"
+options:
+ pre_reboot_delay:
+ description:
+ - Seconds to wait before reboot. Passed as a parameter to the reboot command.
+ - On Linux, macOS and OpenBSD, this is converted to minutes and rounded down. If less than 60, it will be set to 0.
+ - On Solaris and FreeBSD, this will be seconds.
+ type: int
+ default: 0
+ post_reboot_delay:
+ description:
+ - Seconds to wait after the reboot command was successful before attempting to validate the system rebooted successfully.
+ - This is useful if you want wait for something to settle despite your connection already working.
+ type: int
+ default: 0
+ reboot_timeout:
+ description:
+ - Maximum seconds to wait for machine to reboot and respond to a test command.
+ - This timeout is evaluated separately for both reboot verification and test command success so the
+ maximum execution time for the module is twice this amount.
+ type: int
+ default: 600
+ connect_timeout:
+ description:
+ - Maximum seconds to wait for a successful connection to the managed hosts before trying again.
+ - If unspecified, the default setting for the underlying connection plugin is used.
+ type: int
+ test_command:
+ description:
+ - Command to run on the rebooted host and expect success from to determine the machine is ready for
+ further tasks.
+ type: str
+ default: whoami
+ msg:
+ description:
+ - Message to display to users before reboot.
+ type: str
+ default: Reboot initiated by Ansible
+
+ search_paths:
+ description:
+ - Paths to search on the remote machine for the C(shutdown) command.
+ - I(Only) these paths will be searched for the C(shutdown) command. C(PATH) is ignored in the remote node when searching for the C(shutdown) command.
+ type: list
+ elements: str
+ default: ['/sbin', '/bin', '/usr/sbin', '/usr/bin', '/usr/local/sbin']
+ version_added: '2.8'
+
+ boot_time_command:
+ description:
+ - Command to run that returns a unique string indicating the last time the system was booted.
+ - Setting this to a command that has different output each time it is run will cause the task to fail.
+ type: str
+ default: 'cat /proc/sys/kernel/random/boot_id'
+ version_added: '2.10'
+
+ reboot_command:
+ description:
+ - Command to run that reboots the system, including any parameters passed to the command.
+ - Can be an absolute path to the command or just the command name. If an absolute path to the
+ command is not given, C(search_paths) on the target system will be searched to find the absolute path.
+ - This will cause C(pre_reboot_delay), C(post_reboot_delay), and C(msg) to be ignored.
+ type: str
+ default: '[determined based on target OS]'
+ version_added: '2.11'
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+seealso:
+- module: ansible.windows.win_reboot
+author:
+ - Matt Davis (@nitzmahone)
+ - Sam Doran (@samdoran)
+'''
+
+EXAMPLES = r'''
+- name: Unconditionally reboot the machine with all defaults
+ ansible.builtin.reboot:
+
+- name: Reboot a slow machine that might have lots of updates to apply
+ ansible.builtin.reboot:
+ reboot_timeout: 3600
+
+- name: Reboot a machine with shutdown command in unusual place
+ ansible.builtin.reboot:
+ search_paths:
+ - '/lib/molly-guard'
+
+- name: Reboot machine using a custom reboot command
+ ansible.builtin.reboot:
+ reboot_command: launchctl reboot userspace
+ boot_time_command: uptime | cut -d ' ' -f 5
+
+'''
+
+RETURN = r'''
+rebooted:
+ description: true if the machine was rebooted
+ returned: always
+ type: bool
+ sample: true
+elapsed:
+ description: The number of seconds that elapsed waiting for the system to be rebooted.
+ returned: always
+ type: int
+ sample: 23
+'''
diff --git a/lib/ansible/modules/replace.py b/lib/ansible/modules/replace.py
new file mode 100644
index 0000000..4b8f74f
--- /dev/null
+++ b/lib/ansible/modules/replace.py
@@ -0,0 +1,316 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Evan Kaufman <evan@digitalflophouse.com
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: replace
+author: Evan Kaufman (@EvanK)
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.files
+ - files
+ - validate
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix
+ safe_file_operations:
+ support: full
+ vault:
+ support: none
+short_description: Replace all instances of a particular string in a
+ file using a back-referenced regular expression
+description:
+ - This module will replace all instances of a pattern within a file.
+ - It is up to the user to maintain idempotence by ensuring that the
+ same pattern would never match any replacements made.
+version_added: "1.6"
+options:
+ path:
+ description:
+ - The file to modify.
+ - Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name).
+ type: path
+ required: true
+ aliases: [ dest, destfile, name ]
+ regexp:
+ description:
+ - The regular expression to look for in the contents of the file.
+ - Uses Python regular expressions; see
+ U(https://docs.python.org/3/library/re.html).
+ - Uses MULTILINE mode, which means C(^) and C($) match the beginning
+ and end of the file, as well as the beginning and end respectively
+ of I(each line) of the file.
+ - Does not use DOTALL, which means the C(.) special character matches
+ any character I(except newlines). A common mistake is to assume that
+ a negated character set like C([^#]) will also not match newlines.
+ - In order to exclude newlines, they must be added to the set like C([^#\n]).
+ - Note that, as of Ansible 2.0, short form tasks should have any escape
+ sequences backslash-escaped in order to prevent them being parsed
+ as string literal escapes. See the examples.
+ type: str
+ required: true
+ replace:
+ description:
+ - The string to replace regexp matches.
+ - May contain backreferences that will get expanded with the regexp capture groups if the regexp matches.
+ - If not set, matches are removed entirely.
+ - Backreferences can be used ambiguously like C(\1), or explicitly like C(\g<1>).
+ type: str
+ after:
+ description:
+ - If specified, only content after this match will be replaced/removed.
+ - Can be used in combination with C(before).
+ - Uses Python regular expressions; see
+ U(https://docs.python.org/3/library/re.html).
+ - Uses DOTALL, which means the C(.) special character I(can match newlines).
+ type: str
+ version_added: "2.4"
+ before:
+ description:
+ - If specified, only content before this match will be replaced/removed.
+ - Can be used in combination with C(after).
+ - Uses Python regular expressions; see
+ U(https://docs.python.org/3/library/re.html).
+ - Uses DOTALL, which means the C(.) special character I(can match newlines).
+ type: str
+ version_added: "2.4"
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ others:
+ description:
+ - All arguments accepted by the M(ansible.builtin.file) module also work here.
+ type: str
+ encoding:
+ description:
+ - The character encoding for reading and writing the file.
+ type: str
+ default: utf-8
+ version_added: "2.4"
+notes:
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+ - As of Ansible 2.7.10, the combined use of I(before) and I(after) works properly. If you were relying on the
+ previous incorrect behavior, you may be need to adjust your tasks.
+ See U(https://github.com/ansible/ansible/issues/31354) for details.
+ - Option I(follow) has been removed in Ansible 2.5, because this module modifies the contents of the file so I(follow=no) doesn't make sense.
+'''
+
+EXAMPLES = r'''
+- name: Replace old hostname with new hostname (requires Ansible >= 2.4)
+ ansible.builtin.replace:
+ path: /etc/hosts
+ regexp: '(\s+)old\.host\.name(\s+.*)?$'
+ replace: '\1new.host.name\2'
+
+- name: Replace after the expression till the end of the file (requires Ansible >= 2.4)
+ ansible.builtin.replace:
+ path: /etc/apache2/sites-available/default.conf
+ after: 'NameVirtualHost [*]'
+ regexp: '^(.+)$'
+ replace: '# \1'
+
+- name: Replace before the expression till the begin of the file (requires Ansible >= 2.4)
+ ansible.builtin.replace:
+ path: /etc/apache2/sites-available/default.conf
+ before: '# live site config'
+ regexp: '^(.+)$'
+ replace: '# \1'
+
+# Prior to Ansible 2.7.10, using before and after in combination did the opposite of what was intended.
+# see https://github.com/ansible/ansible/issues/31354 for details.
+- name: Replace between the expressions (requires Ansible >= 2.4)
+ ansible.builtin.replace:
+ path: /etc/hosts
+ after: '<VirtualHost [*]>'
+ before: '</VirtualHost>'
+ regexp: '^(.+)$'
+ replace: '# \1'
+
+- name: Supports common file attributes
+ ansible.builtin.replace:
+ path: /home/jdoe/.ssh/known_hosts
+ regexp: '^old\.host\.name[^\n]*\n'
+ owner: jdoe
+ group: jdoe
+ mode: '0644'
+
+- name: Supports a validate command
+ ansible.builtin.replace:
+ path: /etc/apache/ports
+ regexp: '^(NameVirtualHost|Listen)\s+80\s*$'
+ replace: '\1 127.0.0.1:8080'
+ validate: '/usr/sbin/apache2ctl -f %s -t'
+
+- name: Short form task (in ansible 2+) necessitates backslash-escaped sequences
+ ansible.builtin.replace: path=/etc/hosts regexp='\\b(localhost)(\\d*)\\b' replace='\\1\\2.localdomain\\2 \\1\\2'
+
+- name: Long form task does not
+ ansible.builtin.replace:
+ path: /etc/hosts
+ regexp: '\b(localhost)(\d*)\b'
+ replace: '\1\2.localdomain\2 \1\2'
+
+- name: Explicitly specifying positional matched groups in replacement
+ ansible.builtin.replace:
+ path: /etc/ssh/sshd_config
+ regexp: '^(ListenAddress[ ]+)[^\n]+$'
+ replace: '\g<1>0.0.0.0'
+
+- name: Explicitly specifying named matched groups
+ ansible.builtin.replace:
+ path: /etc/ssh/sshd_config
+ regexp: '^(?P<dctv>ListenAddress[ ]+)(?P<host>[^\n]+)$'
+ replace: '#\g<dctv>\g<host>\n\g<dctv>0.0.0.0'
+'''
+
+RETURN = r'''#'''
+
+import os
+import re
+import tempfile
+from traceback import format_exc
+
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.module_utils.basic import AnsibleModule
+
+
+def write_changes(module, contents, path):
+
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ f = os.fdopen(tmpfd, 'wb')
+ f.write(contents)
+ f.close()
+
+ validate = module.params.get('validate', None)
+ valid = not validate
+ if validate:
+ if "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % (validate))
+ (rc, out, err) = module.run_command(validate % tmpfile)
+ valid = rc == 0
+ if rc != 0:
+ module.fail_json(msg='failed to validate: '
+ 'rc:%s error:%s' % (rc, err))
+ if valid:
+ module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
+
+
+def check_file_attrs(module, changed, message):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_file_attributes_if_different(file_args, False):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
+ regexp=dict(type='str', required=True),
+ replace=dict(type='str', default=''),
+ after=dict(type='str'),
+ before=dict(type='str'),
+ backup=dict(type='bool', default=False),
+ validate=dict(type='str'),
+ encoding=dict(type='str', default='utf-8'),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ path = params['path']
+ encoding = params['encoding']
+ res_args = dict(rc=0)
+
+ params['after'] = to_text(params['after'], errors='surrogate_or_strict', nonstring='passthru')
+ params['before'] = to_text(params['before'], errors='surrogate_or_strict', nonstring='passthru')
+ params['regexp'] = to_text(params['regexp'], errors='surrogate_or_strict', nonstring='passthru')
+ params['replace'] = to_text(params['replace'], errors='surrogate_or_strict', nonstring='passthru')
+
+ if os.path.isdir(path):
+ module.fail_json(rc=256, msg='Path %s is a directory !' % path)
+
+ if not os.path.exists(path):
+ module.fail_json(rc=257, msg='Path %s does not exist !' % path)
+ else:
+ try:
+ with open(path, 'rb') as f:
+ contents = to_text(f.read(), errors='surrogate_or_strict', encoding=encoding)
+ except (OSError, IOError) as e:
+ module.fail_json(msg='Unable to read the contents of %s: %s' % (path, to_text(e)),
+ exception=format_exc())
+
+ pattern = u''
+ if params['after'] and params['before']:
+ pattern = u'%s(?P<subsection>.*?)%s' % (params['after'], params['before'])
+ elif params['after']:
+ pattern = u'%s(?P<subsection>.*)' % params['after']
+ elif params['before']:
+ pattern = u'(?P<subsection>.*)%s' % params['before']
+
+ if pattern:
+ section_re = re.compile(pattern, re.DOTALL)
+ match = re.search(section_re, contents)
+ if match:
+ section = match.group('subsection')
+ indices = [match.start('subsection'), match.end('subsection')]
+ else:
+ res_args['msg'] = 'Pattern for before/after params did not match the given file: %s' % pattern
+ res_args['changed'] = False
+ module.exit_json(**res_args)
+ else:
+ section = contents
+
+ mre = re.compile(params['regexp'], re.MULTILINE)
+ result = re.subn(mre, params['replace'], section, 0)
+
+ if result[1] > 0 and section != result[0]:
+ if pattern:
+ result = (contents[:indices[0]] + result[0] + contents[indices[1]:], result[1])
+ msg = '%s replacements made' % result[1]
+ changed = True
+ if module._diff:
+ res_args['diff'] = {
+ 'before_header': path,
+ 'before': contents,
+ 'after_header': path,
+ 'after': result[0],
+ }
+ else:
+ msg = ''
+ changed = False
+
+ if changed and not module.check_mode:
+ if params['backup'] and os.path.exists(path):
+ res_args['backup_file'] = module.backup_local(path)
+ # We should always follow symlinks so that we change the real file
+ path = os.path.realpath(path)
+ write_changes(module, to_bytes(result[0], encoding=encoding), path)
+
+ res_args['msg'], res_args['changed'] = check_file_attrs(module, changed, msg)
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/rpm_key.py b/lib/ansible/modules/rpm_key.py
new file mode 100644
index 0000000..f420eec
--- /dev/null
+++ b/lib/ansible/modules/rpm_key.py
@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+
+# Ansible module to import third party repo keys to your rpm db
+# Copyright: (c) 2013, Héctor Acosta <hector.acosta@gazzang.com>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rpm_key
+author:
+ - Hector Acosta (@hacosta) <hector.acosta@gazzang.com>
+short_description: Adds or removes a gpg key from the rpm db
+description:
+ - Adds or removes (rpm --import) a gpg key to your rpm database.
+version_added: "1.3"
+options:
+ key:
+ description:
+ - Key that will be modified. Can be a url, a file on the managed node, or a keyid if the key
+ already exists in the database.
+ type: str
+ required: true
+ state:
+ description:
+ - If the key will be imported or removed from the rpm db.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ validate_certs:
+ description:
+ - If C(false) and the C(key) is a url starting with https, SSL certificates will not be validated.
+ - This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ fingerprint:
+ description:
+ - The long-form fingerprint of the key being imported.
+ - This will be used to verify the specified key.
+ type: str
+ version_added: 2.9
+extends_documentation_fragment:
+ - action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: rhel
+'''
+
+EXAMPLES = '''
+- name: Import a key from a url
+ ansible.builtin.rpm_key:
+ state: present
+ key: http://apt.sw.be/RPM-GPG-KEY.dag.txt
+
+- name: Import a key from a file
+ ansible.builtin.rpm_key:
+ state: present
+ key: /path/to/key.gpg
+
+- name: Ensure a key is not present in the db
+ ansible.builtin.rpm_key:
+ state: absent
+ key: DEADB33F
+
+- name: Verify the key, using a fingerprint, before import
+ ansible.builtin.rpm_key:
+ key: /path/to/RPM-GPG-KEY.dag.txt
+ fingerprint: EBC6 E12C 62B1 C734 026B 2122 A20E 5214 6B8D 79E6
+'''
+
+RETURN = r'''#'''
+
+import re
+import os.path
+import tempfile
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_native
+
+
+def is_pubkey(string):
+ """Verifies if string is a pubkey"""
+ pgp_regex = ".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*"
+ return bool(re.match(pgp_regex, to_native(string, errors='surrogate_or_strict'), re.DOTALL))
+
+
+class RpmKey(object):
+
+ def __init__(self, module):
+ # If the key is a url, we need to check if it's present to be idempotent,
+ # to do that, we need to check the keyid, which we can get from the armor.
+ keyfile = None
+ should_cleanup_keyfile = False
+ self.module = module
+ self.rpm = self.module.get_bin_path('rpm', True)
+ state = module.params['state']
+ key = module.params['key']
+ fingerprint = module.params['fingerprint']
+ if fingerprint:
+ fingerprint = fingerprint.replace(' ', '').upper()
+
+ self.gpg = self.module.get_bin_path('gpg')
+ if not self.gpg:
+ self.gpg = self.module.get_bin_path('gpg2', required=True)
+
+ if '://' in key:
+ keyfile = self.fetch_key(key)
+ keyid = self.getkeyid(keyfile)
+ should_cleanup_keyfile = True
+ elif self.is_keyid(key):
+ keyid = key
+ elif os.path.isfile(key):
+ keyfile = key
+ keyid = self.getkeyid(keyfile)
+ else:
+ self.module.fail_json(msg="Not a valid key %s" % key)
+ keyid = self.normalize_keyid(keyid)
+
+ if state == 'present':
+ if self.is_key_imported(keyid):
+ module.exit_json(changed=False)
+ else:
+ if not keyfile:
+ self.module.fail_json(msg="When importing a key, a valid file must be given")
+ if fingerprint:
+ has_fingerprint = self.getfingerprint(keyfile)
+ if fingerprint != has_fingerprint:
+ self.module.fail_json(
+ msg="The specified fingerprint, '%s', does not match the key fingerprint '%s'" % (fingerprint, has_fingerprint)
+ )
+ self.import_key(keyfile)
+ if should_cleanup_keyfile:
+ self.module.cleanup(keyfile)
+ module.exit_json(changed=True)
+ else:
+ if self.is_key_imported(keyid):
+ self.drop_key(keyid)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ def fetch_key(self, url):
+ """Downloads a key from url, returns a valid path to a gpg key"""
+ rsp, info = fetch_url(self.module, url)
+ if info['status'] != 200:
+ self.module.fail_json(msg="failed to fetch key at %s , error was: %s" % (url, info['msg']))
+
+ key = rsp.read()
+ if not is_pubkey(key):
+ self.module.fail_json(msg="Not a public key: %s" % url)
+ tmpfd, tmpname = tempfile.mkstemp()
+ self.module.add_cleanup_file(tmpname)
+ tmpfile = os.fdopen(tmpfd, "w+b")
+ tmpfile.write(key)
+ tmpfile.close()
+ return tmpname
+
+ def normalize_keyid(self, keyid):
+ """Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is uppercase"""
+ ret = keyid.strip().upper()
+ if ret.startswith('0x'):
+ return ret[2:]
+ elif ret.startswith('0X'):
+ return ret[2:]
+ else:
+ return ret
+
+ def getkeyid(self, keyfile):
+ stdout, stderr = self.execute_command([self.gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', keyfile])
+ for line in stdout.splitlines():
+ line = line.strip()
+ if line.startswith('pub:'):
+ return line.split(':')[4]
+
+ self.module.fail_json(msg="Unexpected gpg output")
+
+ def getfingerprint(self, keyfile):
+ stdout, stderr = self.execute_command([
+ self.gpg, '--no-tty', '--batch', '--with-colons',
+ '--fixed-list-mode', '--with-fingerprint', keyfile
+ ])
+ for line in stdout.splitlines():
+ line = line.strip()
+ if line.startswith('fpr:'):
+ # As mentioned here,
+ #
+ # https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob_plain;f=doc/DETAILS
+ #
+ # The description of the `fpr` field says
+ #
+ # "fpr :: Fingerprint (fingerprint is in field 10)"
+ #
+ return line.split(':')[9]
+
+ self.module.fail_json(msg="Unexpected gpg output")
+
+ def is_keyid(self, keystr):
+ """Verifies if a key, as provided by the user is a keyid"""
+ return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)
+
+ def execute_command(self, cmd):
+ rc, stdout, stderr = self.module.run_command(cmd, use_unsafe_shell=True)
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+ return stdout, stderr
+
+ def is_key_imported(self, keyid):
+ cmd = self.rpm + ' -q gpg-pubkey'
+ rc, stdout, stderr = self.module.run_command(cmd)
+ if rc != 0: # No key is installed on system
+ return False
+ cmd += ' --qf "%{description}" | ' + self.gpg + ' --no-tty --batch --with-colons --fixed-list-mode -'
+ stdout, stderr = self.execute_command(cmd)
+ for line in stdout.splitlines():
+ if keyid in line.split(':')[4]:
+ return True
+ return False
+
+ def import_key(self, keyfile):
+ if not self.module.check_mode:
+ self.execute_command([self.rpm, '--import', keyfile])
+
+ def drop_key(self, keyid):
+ if not self.module.check_mode:
+ self.execute_command([self.rpm, '--erase', '--allmatches', "gpg-pubkey-%s" % keyid[-8:].lower()])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ key=dict(type='str', required=True, no_log=False),
+ fingerprint=dict(type='str'),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ RpmKey(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/script.py b/lib/ansible/modules/script.py
new file mode 100644
index 0000000..2cefc0a
--- /dev/null
+++ b/lib/ansible/modules/script.py
@@ -0,0 +1,108 @@
+# Copyright: (c) 2012, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: script
+version_added: "0.9"
+short_description: Runs a local script on a remote node after transferring it
+description:
+ - The C(script) module takes the script name followed by a list of space-delimited arguments.
+ - Either a free form command or C(cmd) parameter is required, see the examples.
+ - The local script at path will be transferred to the remote node and then executed.
+ - The given script will be processed through the shell environment on the remote node.
+ - This module does not require python on the remote system, much like the M(ansible.builtin.raw) module.
+ - This module is also supported for Windows targets.
+options:
+ free_form:
+ description:
+ - Path to the local script file followed by optional arguments.
+ cmd:
+ type: str
+ description:
+ - Path to the local script to run followed by optional arguments.
+ creates:
+ description:
+ - A filename on the remote node, when it already exists, this step will B(not) be run.
+ version_added: "1.5"
+ removes:
+ description:
+ - A filename on the remote node, when it does not exist, this step will B(not) be run.
+ version_added: "1.5"
+ chdir:
+ description:
+ - Change into this directory on the remote node before running the script.
+ version_added: "2.4"
+ executable:
+ description:
+ - Name or path of a executable to invoke the script with.
+ version_added: "2.6"
+notes:
+ - It is usually preferable to write Ansible modules rather than pushing scripts. Convert your script to an Ansible module for bonus points!
+ - The C(ssh) connection plugin will force pseudo-tty allocation via C(-tt) when scripts are executed. Pseudo-ttys do not have a stderr channel and all
+ stderr is sent to stdout. If you depend on separated stdout and stderr result keys, please switch to a copy+command set of tasks instead of using script.
+ - If the path to the local script contains spaces, it needs to be quoted.
+ - This module is also supported for Windows targets.
+seealso:
+ - module: ansible.builtin.shell
+ - module: ansible.windows.win_shell
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.files
+ - action_common_attributes.raw
+ - decrypt
+attributes:
+ check_mode:
+ support: partial
+ details: while the script itself is arbitrary and cannot be subject to the check mode semantics it adds C(creates)/C(removes) options as a workaround
+ diff_mode:
+ support: none
+ platform:
+ details: This action is one of the few that requires no Python on the remote as it passes the command directly into the connection string
+ platforms: all
+ raw:
+ support: full
+ safe_file_operations:
+ support: none
+ vault:
+ support: full
+'''
+
+EXAMPLES = r'''
+- name: Run a script with arguments (free form)
+ ansible.builtin.script: /some/local/script.sh --some-argument 1234
+
+- name: Run a script with arguments (using 'cmd' parameter)
+ ansible.builtin.script:
+ cmd: /some/local/script.sh --some-argument 1234
+
+- name: Run a script only if file.txt does not exist on the remote node
+ ansible.builtin.script: /some/local/create_file.sh --some-argument 1234
+ args:
+ creates: /the/created/file.txt
+
+- name: Run a script only if file.txt exists on the remote node
+ ansible.builtin.script: /some/local/remove_file.sh --some-argument 1234
+ args:
+ removes: /the/removed/file.txt
+
+- name: Run a script using an executable in a non-system path
+ ansible.builtin.script: /some/local/script
+ args:
+ executable: /some/remote/executable
+
+- name: Run a script using an executable in a system path
+ ansible.builtin.script: /some/local/script.py
+ args:
+ executable: python3
+
+- name: Run a Powershell script on a windows host
+ script: subdirectories/under/path/with/your/playbook/script.ps1
+'''
diff --git a/lib/ansible/modules/service.py b/lib/ansible/modules/service.py
new file mode 100644
index 0000000..a84829c
--- /dev/null
+++ b/lib/ansible/modules/service.py
@@ -0,0 +1,1699 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: service
+version_added: "0.1"
+short_description: Manage services
+description:
+ - Controls services on remote hosts. Supported init systems include BSD init,
+ OpenRC, SysV, Solaris SMF, systemd, upstart.
+ - This module acts as a proxy to the underlying service manager module. While all arguments will be passed to the
+ underlying module, not all modules support the same arguments. This documentation only covers the minimum intersection
+ of module arguments that all service manager modules support.
+ - This module is a proxy for multiple more specific service manager modules
+ (such as M(ansible.builtin.systemd) and M(ansible.builtin.sysvinit)).
+ This allows management of a heterogeneous environment of machines without creating a specific task for
+ each service manager. The module to be executed is determined by the I(use) option, which defaults to the
+ service manager discovered by M(ansible.builtin.setup). If C(setup) was not yet run, this module may run it.
+ - For Windows targets, use the M(ansible.windows.win_service) module instead.
+options:
+ name:
+ description:
+ - Name of the service.
+ type: str
+ required: true
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary.
+ - C(restarted) will always bounce the service.
+ - C(reloaded) will always reload.
+ - B(At least one of state and enabled are required.)
+ - Note that reloaded will start the service if it is not already started,
+ even if your chosen init system wouldn't normally.
+ type: str
+ choices: [ reloaded, restarted, started, stopped ]
+ sleep:
+ description:
+ - If the service is being C(restarted) then sleep this many seconds
+ between the stop and start command.
+ - This helps to work around badly-behaving init scripts that exit immediately
+ after signaling a process to stop.
+ - Not all service managers support sleep, i.e when using systemd this setting will be ignored.
+ type: int
+ version_added: "1.3"
+ pattern:
+ description:
+ - If the service does not respond to the status command, name a
+ substring to look for as would be found in the output of the I(ps)
+ command as a stand-in for a status result.
+ - If the string is found, the service will be assumed to be started.
+ - While using remote hosts with systemd this setting will be ignored.
+ type: str
+ version_added: "0.7"
+ enabled:
+ description:
+ - Whether the service should start on boot.
+ - B(At least one of state and enabled are required.)
+ type: bool
+ runlevel:
+ description:
+ - For OpenRC init scripts (e.g. Gentoo) only.
+ - The runlevel that this service belongs to.
+ - While using remote hosts with systemd this setting will be ignored.
+ type: str
+ default: default
+ arguments:
+ description:
+ - Additional arguments provided on the command line.
+ - While using remote hosts with systemd this setting will be ignored.
+ type: str
+ aliases: [ args ]
+ use:
+ description:
+ - The service module actually uses system specific modules, normally through auto detection, this setting can force a specific module.
+ - Normally it uses the value of the 'ansible_service_mgr' fact and falls back to the old 'service' module when none matching is found.
+ type: str
+ default: auto
+ version_added: 2.2
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: full
+ bypass_host_loop:
+ support: none
+ check_mode:
+ details: support depends on the underlying plugin invoked
+ support: N/A
+ diff_mode:
+ details: support depends on the underlying plugin invoked
+ support: N/A
+ platform:
+ details: The support depends on the availability for the specific plugin for each platform and if fact gathering is able to detect it
+ platforms: all
+notes:
+ - For AIX, group subsystem names can be used.
+seealso:
+ - module: ansible.windows.win_service
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Start service httpd, if not started
+ ansible.builtin.service:
+ name: httpd
+ state: started
+
+- name: Stop service httpd, if started
+ ansible.builtin.service:
+ name: httpd
+ state: stopped
+
+- name: Restart service httpd, in all cases
+ ansible.builtin.service:
+ name: httpd
+ state: restarted
+
+- name: Reload service httpd, in all cases
+ ansible.builtin.service:
+ name: httpd
+ state: reloaded
+
+- name: Enable service httpd, and not touch the state
+ ansible.builtin.service:
+ name: httpd
+ enabled: yes
+
+- name: Start service foo, based on running process /usr/bin/foo
+ ansible.builtin.service:
+ name: foo
+ pattern: /usr/bin/foo
+ state: started
+
+- name: Restart network service for interface eth0
+ ansible.builtin.service:
+ name: network
+ state: restarted
+ args: eth0
+'''
+
+RETURN = r'''#'''
+
+import glob
+import json
+import os
+import platform
+import re
+import select
+import shlex
+import subprocess
+import tempfile
+import time
+
+# The distutils module is not shipped with SUNWPython on Solaris.
+# It's in the SUNWPython-devel package which also contains development files
+# that don't belong on production boxes. Since our Solaris code doesn't
+# depend on LooseVersion, do not import it on Solaris.
+if platform.system() != 'SunOS':
+ from ansible.module_utils.compat.version import LooseVersion
+
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.sys_info import get_platform_subclass
+from ansible.module_utils.service import fail_if_missing
+from ansible.module_utils.six import PY2, b
+
+
+class Service(object):
+ """
+ This is the generic Service manipulation class that is subclassed
+ based on platform.
+
+ A subclass should override the following action methods:-
+ - get_service_tools
+ - service_enable
+ - get_service_status
+ - service_control
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(Service)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params['name']
+ self.state = module.params['state']
+ self.sleep = module.params['sleep']
+ self.pattern = module.params['pattern']
+ self.enable = module.params['enabled']
+ self.runlevel = module.params['runlevel']
+ self.changed = False
+ self.running = None
+ self.crashed = None
+ self.action = None
+ self.svc_cmd = None
+ self.svc_initscript = None
+ self.svc_initctl = None
+ self.enable_cmd = None
+ self.arguments = module.params.get('arguments', '')
+ self.rcconf_file = None
+ self.rcconf_key = None
+ self.rcconf_value = None
+ self.svc_change = False
+
+ # ===========================================
+ # Platform specific methods (must be replaced by subclass).
+
+ def get_service_tools(self):
+ self.module.fail_json(msg="get_service_tools not implemented on target platform")
+
+ def service_enable(self):
+ self.module.fail_json(msg="service_enable not implemented on target platform")
+
+ def get_service_status(self):
+ self.module.fail_json(msg="get_service_status not implemented on target platform")
+
+ def service_control(self):
+ self.module.fail_json(msg="service_control not implemented on target platform")
+
+ # ===========================================
+ # Generic methods that should be used on all platforms.
+
+ def execute_command(self, cmd, daemonize=False):
+
+ locale = get_best_parsable_locale(self.module)
+ lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
+
+ # Most things don't need to be daemonized
+ if not daemonize:
+ # chkconfig localizes messages and we're screen scraping so make
+ # sure we use the C locale
+ return self.module.run_command(cmd, environ_update=lang_env)
+
+ # This is complex because daemonization is hard for people.
+ # What we do is daemonize a part of this module, the daemon runs the
+ # command, picks up the return code and output, and returns it to the
+ # main process.
+ pipe = os.pipe()
+ pid = os.fork()
+ if pid == 0:
+ os.close(pipe[0])
+ # Set stdin/stdout/stderr to /dev/null
+ fd = os.open(os.devnull, os.O_RDWR)
+ if fd != 0:
+ os.dup2(fd, 0)
+ if fd != 1:
+ os.dup2(fd, 1)
+ if fd != 2:
+ os.dup2(fd, 2)
+ if fd not in (0, 1, 2):
+ os.close(fd)
+
+ # Make us a daemon. Yes, that's all it takes.
+ pid = os.fork()
+ if pid > 0:
+ os._exit(0)
+ os.setsid()
+ os.chdir("/")
+ pid = os.fork()
+ if pid > 0:
+ os._exit(0)
+
+ # Start the command
+ if PY2:
+ # Python 2.6's shlex.split can't handle text strings correctly
+ cmd = to_bytes(cmd, errors='surrogate_or_strict')
+ cmd = shlex.split(cmd)
+ else:
+ # Python3.x shex.split text strings.
+ cmd = to_text(cmd, errors='surrogate_or_strict')
+ cmd = [to_bytes(c, errors='surrogate_or_strict') for c in shlex.split(cmd)]
+ # In either of the above cases, pass a list of byte strings to Popen
+
+ # chkconfig localizes messages and we're screen scraping so make
+ # sure we use the C locale
+ p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=lang_env, preexec_fn=lambda: os.close(pipe[1]))
+ stdout = b("")
+ stderr = b("")
+ fds = [p.stdout, p.stderr]
+ # Wait for all output, or until the main process is dead and its output is done.
+ while fds:
+ rfd, wfd, efd = select.select(fds, [], fds, 1)
+ if not (rfd + wfd + efd) and p.poll() is not None:
+ break
+ if p.stdout in rfd:
+ dat = os.read(p.stdout.fileno(), 4096)
+ if not dat:
+ fds.remove(p.stdout)
+ stdout += dat
+ if p.stderr in rfd:
+ dat = os.read(p.stderr.fileno(), 4096)
+ if not dat:
+ fds.remove(p.stderr)
+ stderr += dat
+ p.wait()
+ # Return a JSON blob to parent
+ blob = json.dumps([p.returncode, to_text(stdout), to_text(stderr)])
+ os.write(pipe[1], to_bytes(blob, errors='surrogate_or_strict'))
+ os.close(pipe[1])
+ os._exit(0)
+ elif pid == -1:
+ self.module.fail_json(msg="unable to fork")
+ else:
+ os.close(pipe[1])
+ os.waitpid(pid, 0)
+ # Wait for data from daemon process and process it.
+ data = b("")
+ while True:
+ rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
+ if pipe[0] in rfd:
+ dat = os.read(pipe[0], 4096)
+ if not dat:
+ break
+ data += dat
+ return json.loads(to_text(data, errors='surrogate_or_strict'))
+
+ def check_ps(self):
+ # Set ps flags
+ if platform.system() == 'SunOS':
+ psflags = '-ef'
+ else:
+ psflags = 'auxww'
+
+ # Find ps binary
+ psbin = self.module.get_bin_path('ps', True)
+
+ (rc, psout, pserr) = self.execute_command('%s %s' % (psbin, psflags))
+ # If rc is 0, set running as appropriate
+ if rc == 0:
+ self.running = False
+ lines = psout.split("\n")
+ for line in lines:
+ if self.pattern in line and "pattern=" not in line:
+ # so as to not confuse ./hacking/test-module.py
+ self.running = True
+ break
+
+ def check_service_changed(self):
+ if self.state and self.running is None:
+ self.module.fail_json(msg="failed determining service state, possible typo of service name?")
+ # Find out if state has changed
+ if not self.running and self.state in ["reloaded", "started"]:
+ self.svc_change = True
+ elif self.running and self.state in ["reloaded", "stopped"]:
+ self.svc_change = True
+ elif self.state == "restarted":
+ self.svc_change = True
+ if self.module.check_mode and self.svc_change:
+ self.module.exit_json(changed=True, msg='service state changed')
+
+ def modify_service_state(self):
+
+ # Only do something if state will change
+ if self.svc_change:
+ # Control service
+ if self.state in ['started']:
+ self.action = "start"
+ elif not self.running and self.state == 'reloaded':
+ self.action = "start"
+ elif self.state == 'stopped':
+ self.action = "stop"
+ elif self.state == 'reloaded':
+ self.action = "reload"
+ elif self.state == 'restarted':
+ self.action = "restart"
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='changing service state')
+
+ return self.service_control()
+
+ else:
+ # If nothing needs to change just say all is well
+ rc = 0
+ err = ''
+ out = ''
+ return rc, out, err
+
+ def service_enable_rcconf(self):
+ if self.rcconf_file is None or self.rcconf_key is None or self.rcconf_value is None:
+ self.module.fail_json(msg="service_enable_rcconf() requires rcconf_file, rcconf_key and rcconf_value")
+
+ self.changed = None
+ entry = '%s="%s"\n' % (self.rcconf_key, self.rcconf_value)
+ with open(self.rcconf_file, "r") as RCFILE:
+ new_rc_conf = []
+
+ # Build a list containing the possibly modified file.
+ for rcline in RCFILE:
+ # Parse line removing whitespaces, quotes, etc.
+ rcarray = shlex.split(rcline, comments=True)
+ if len(rcarray) >= 1 and '=' in rcarray[0]:
+ (key, value) = rcarray[0].split("=", 1)
+ if key == self.rcconf_key:
+ if value.upper() == self.rcconf_value:
+ # Since the proper entry already exists we can stop iterating.
+ self.changed = False
+ break
+ else:
+ # We found the key but the value is wrong, replace with new entry.
+ rcline = entry
+ self.changed = True
+
+ # Add line to the list.
+ new_rc_conf.append(rcline.strip() + '\n')
+
+ # If we did not see any trace of our entry we need to add it.
+ if self.changed is None:
+ new_rc_conf.append(entry)
+ self.changed = True
+
+ if self.changed is True:
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg="changing service enablement")
+
+ # Create a temporary file next to the current rc.conf (so we stay on the same filesystem).
+ # This way the replacement operation is atomic.
+ rcconf_dir = os.path.dirname(self.rcconf_file)
+ rcconf_base = os.path.basename(self.rcconf_file)
+ (TMP_RCCONF, tmp_rcconf_file) = tempfile.mkstemp(dir=rcconf_dir, prefix="%s-" % rcconf_base)
+
+ # Write out the contents of the list into our temporary file.
+ for rcline in new_rc_conf:
+ os.write(TMP_RCCONF, rcline.encode())
+
+ # Close temporary file.
+ os.close(TMP_RCCONF)
+
+ # Replace previous rc.conf.
+ self.module.atomic_move(tmp_rcconf_file, self.rcconf_file)
+
+
+class LinuxService(Service):
+ """
+ This is the Linux Service manipulation class - it is currently supporting
+ a mixture of binaries and init scripts for controlling services started at
+ boot, as well as for controlling the current state.
+ """
+
+ platform = 'Linux'
+ distribution = None
+
+ def get_service_tools(self):
+
+ paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin']
+ binaries = ['service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart', 'insserv']
+ initpaths = ['/etc/init.d']
+ location = dict()
+
+ for binary in binaries:
+ location[binary] = self.module.get_bin_path(binary, opt_dirs=paths)
+
+ for initdir in initpaths:
+ initscript = "%s/%s" % (initdir, self.name)
+ if os.path.isfile(initscript):
+ self.svc_initscript = initscript
+
+ def check_systemd():
+
+ # tools must be installed
+ if location.get('systemctl', False):
+
+ # this should show if systemd is the boot init system
+ # these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
+ for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
+ if os.path.exists(canary):
+ return True
+
+ # If all else fails, check if init is the systemd command, using comm as cmdline could be symlink
+ try:
+ f = open('/proc/1/comm', 'r')
+ except IOError:
+ # If comm doesn't exist, old kernel, no systemd
+ return False
+
+ for line in f:
+ if 'systemd' in line:
+ return True
+
+ return False
+
+ # Locate a tool to enable/disable a service
+ if check_systemd():
+ # service is managed by systemd
+ self.__systemd_unit = self.name
+ self.svc_cmd = location['systemctl']
+ self.enable_cmd = location['systemctl']
+
+ elif location.get('initctl', False) and os.path.exists("/etc/init/%s.conf" % self.name):
+ # service is managed by upstart
+ self.enable_cmd = location['initctl']
+ # set the upstart version based on the output of 'initctl version'
+ self.upstart_version = LooseVersion('0.0.0')
+ try:
+ version_re = re.compile(r'\(upstart (.*)\)')
+ rc, stdout, stderr = self.module.run_command('%s version' % location['initctl'])
+ if rc == 0:
+ res = version_re.search(stdout)
+ if res:
+ self.upstart_version = LooseVersion(res.groups()[0])
+ except Exception:
+ pass # we'll use the default of 0.0.0
+
+ self.svc_cmd = location['initctl']
+
+ elif location.get('rc-service', False):
+ # service is managed by OpenRC
+ self.svc_cmd = location['rc-service']
+ self.enable_cmd = location['rc-update']
+ return # already have service start/stop tool too!
+
+ elif self.svc_initscript:
+ # service is managed by with SysV init scripts
+ if location.get('update-rc.d', False):
+ # and uses update-rc.d
+ self.enable_cmd = location['update-rc.d']
+ elif location.get('insserv', None):
+ # and uses insserv
+ self.enable_cmd = location['insserv']
+ elif location.get('chkconfig', False):
+ # and uses chkconfig
+ self.enable_cmd = location['chkconfig']
+
+ if self.enable_cmd is None:
+ fail_if_missing(self.module, False, self.name, msg='host')
+
+ # If no service control tool selected yet, try to see if 'service' is available
+ if self.svc_cmd is None and location.get('service', False):
+ self.svc_cmd = location['service']
+
+ # couldn't find anything yet
+ if self.svc_cmd is None and not self.svc_initscript:
+ self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting')
+
+ if location.get('initctl', False):
+ self.svc_initctl = location['initctl']
+
+ def get_systemd_service_enabled(self):
+ def sysv_exists(name):
+ script = '/etc/init.d/' + name
+ return os.access(script, os.X_OK)
+
+ def sysv_is_enabled(name):
+ return bool(glob.glob('/etc/rc?.d/S??' + name))
+
+ service_name = self.__systemd_unit
+ (rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, service_name,))
+ if rc == 0:
+ return True
+ elif out.startswith('disabled'):
+ return False
+ elif sysv_exists(service_name):
+ return sysv_is_enabled(service_name)
+ else:
+ return False
+
+ def get_systemd_status_dict(self):
+
+ # Check status first as show will not fail if service does not exist
+ (rc, out, err) = self.execute_command("%s show '%s'" % (self.enable_cmd, self.__systemd_unit,))
+ if rc != 0:
+ self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err))
+ elif 'LoadState=not-found' in out:
+ self.module.fail_json(msg='systemd could not find the requested service "%r": %s' % (self.__systemd_unit, err))
+
+ key = None
+ value_buffer = []
+ status_dict = {}
+ for line in out.splitlines():
+ if '=' in line:
+ if not key:
+ key, value = line.split('=', 1)
+ # systemd fields that are shell commands can be multi-line
+ # We take a value that begins with a "{" as the start of
+ # a shell command and a line that ends with "}" as the end of
+ # the command
+ if value.lstrip().startswith('{'):
+ if value.rstrip().endswith('}'):
+ status_dict[key] = value
+ key = None
+ else:
+ value_buffer.append(value)
+ else:
+ status_dict[key] = value
+ key = None
+ else:
+ if line.rstrip().endswith('}'):
+ status_dict[key] = '\n'.join(value_buffer)
+ key = None
+ else:
+ value_buffer.append(value)
+ else:
+ value_buffer.append(value)
+
+ return status_dict
+
+ def get_systemd_service_status(self):
+ d = self.get_systemd_status_dict()
+ if d.get('ActiveState') == 'active':
+ # run-once services (for which a single successful exit indicates
+ # that they are running as designed) should not be restarted here.
+ # Thus, we are not checking d['SubState'].
+ self.running = True
+ self.crashed = False
+ elif d.get('ActiveState') == 'failed':
+ self.running = False
+ self.crashed = True
+ elif d.get('ActiveState') is None:
+ self.module.fail_json(msg='No ActiveState value in systemctl show output for %r' % (self.__systemd_unit,))
+ else:
+ self.running = False
+ self.crashed = False
+ return self.running
+
+ def get_service_status(self):
+ if self.svc_cmd and self.svc_cmd.endswith('systemctl'):
+ return self.get_systemd_service_status()
+
+ self.action = "status"
+ rc, status_stdout, status_stderr = self.service_control()
+
+ # if we have decided the service is managed by upstart, we check for some additional output...
+ if self.svc_initctl and self.running is None:
+ # check the job status by upstart response
+ initctl_rc, initctl_status_stdout, initctl_status_stderr = self.execute_command("%s status %s %s" % (self.svc_initctl, self.name, self.arguments))
+ if "stop/waiting" in initctl_status_stdout:
+ self.running = False
+ elif "start/running" in initctl_status_stdout:
+ self.running = True
+
+ if self.svc_cmd and self.svc_cmd.endswith("rc-service") and self.running is None:
+ openrc_rc, openrc_status_stdout, openrc_status_stderr = self.execute_command("%s %s status" % (self.svc_cmd, self.name))
+ self.running = "started" in openrc_status_stdout
+ self.crashed = "crashed" in openrc_status_stderr
+
+ # Prefer a non-zero return code. For reference, see:
+ # http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
+ if self.running is None and rc in [1, 2, 3, 4, 69]:
+ self.running = False
+
+ # if the job status is still not known check it by status output keywords
+ # Only check keywords if there's only one line of output (some init
+ # scripts will output verbosely in case of error and those can emit
+ # keywords that are picked up as false positives
+ if self.running is None and status_stdout.count('\n') <= 1:
+ # first transform the status output that could irritate keyword matching
+ cleanout = status_stdout.lower().replace(self.name.lower(), '')
+ if "stop" in cleanout:
+ self.running = False
+ elif "run" in cleanout:
+ self.running = not ("not " in cleanout)
+ elif "start" in cleanout and "not " not in cleanout:
+ self.running = True
+ elif 'could not access pid file' in cleanout:
+ self.running = False
+ elif 'is dead and pid file exists' in cleanout:
+ self.running = False
+ elif 'dead but subsys locked' in cleanout:
+ self.running = False
+ elif 'dead but pid file exists' in cleanout:
+ self.running = False
+
+ # if the job status is still not known and we got a zero for the
+ # return code, assume here that the service is running
+ if self.running is None and rc == 0:
+ self.running = True
+
+ # if the job status is still not known check it by special conditions
+ if self.running is None:
+ if self.name == 'iptables' and "ACCEPT" in status_stdout:
+ # iptables status command output is lame
+ # TODO: lookup if we can use a return code for this instead?
+ self.running = True
+
+ return self.running
+
+ def service_enable(self):
+
+ if self.enable_cmd is None:
+ self.module.fail_json(msg='cannot detect command to enable service %s, typo or init system potentially unknown' % self.name)
+
+ self.changed = True
+ action = None
+
+ #
+ # Upstart's initctl
+ #
+ if self.enable_cmd.endswith("initctl"):
+ def write_to_override_file(file_name, file_contents, ):
+ override_file = open(file_name, 'w')
+ override_file.write(file_contents)
+ override_file.close()
+
+ initpath = '/etc/init'
+ if self.upstart_version >= LooseVersion('0.6.7'):
+ manreg = re.compile(r'^manual\s*$', re.M | re.I)
+ config_line = 'manual\n'
+ else:
+ manreg = re.compile(r'^start on manual\s*$', re.M | re.I)
+ config_line = 'start on manual\n'
+ conf_file_name = "%s/%s.conf" % (initpath, self.name)
+ override_file_name = "%s/%s.override" % (initpath, self.name)
+
+ # Check to see if files contain the manual line in .conf and fail if True
+ with open(conf_file_name) as conf_file_fh:
+ conf_file_content = conf_file_fh.read()
+ if manreg.search(conf_file_content):
+ self.module.fail_json(msg="manual stanza not supported in a .conf file")
+
+ self.changed = False
+ if os.path.exists(override_file_name):
+ with open(override_file_name) as override_fh:
+ override_file_contents = override_fh.read()
+ # Remove manual stanza if present and service enabled
+ if self.enable and manreg.search(override_file_contents):
+ self.changed = True
+ override_state = manreg.sub('', override_file_contents)
+ # Add manual stanza if not present and service disabled
+ elif not (self.enable) and not (manreg.search(override_file_contents)):
+ self.changed = True
+ override_state = '\n'.join((override_file_contents, config_line))
+ # service already in desired state
+ else:
+ pass
+ # Add file with manual stanza if service disabled
+ elif not (self.enable):
+ self.changed = True
+ override_state = config_line
+ else:
+ # service already in desired state
+ pass
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=self.changed)
+
+ # The initctl method of enabling and disabling services is much
+ # different than for the other service methods. So actually
+ # committing the change is done in this conditional and then we
+ # skip the boilerplate at the bottom of the method
+ if self.changed:
+ try:
+ write_to_override_file(override_file_name, override_state)
+ except Exception:
+ self.module.fail_json(msg='Could not modify override file')
+
+ return
+
+ #
+ # SysV's chkconfig
+ #
+ if self.enable_cmd.endswith("chkconfig"):
+ if self.enable:
+ action = 'on'
+ else:
+ action = 'off'
+
+ (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
+ if 'chkconfig --add %s' % self.name in err:
+ self.execute_command("%s --add %s" % (self.enable_cmd, self.name))
+ (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
+ if self.name not in out:
+ self.module.fail_json(msg="service %s does not support chkconfig" % self.name)
+ # TODO: look back on why this is here
+ # state = out.split()[-1]
+
+ # Check if we're already in the correct state
+ if "3:%s" % action in out and "5:%s" % action in out:
+ self.changed = False
+ return
+
+ #
+ # Systemd's systemctl
+ #
+ if self.enable_cmd.endswith("systemctl"):
+ if self.enable:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ # Check if we're already in the correct state
+ service_enabled = self.get_systemd_service_enabled()
+
+ # self.changed should already be true
+ if self.enable == service_enabled:
+ self.changed = False
+ return
+
+ #
+ # OpenRC's rc-update
+ #
+ if self.enable_cmd.endswith("rc-update"):
+ if self.enable:
+ action = 'add'
+ else:
+ action = 'delete'
+
+ (rc, out, err) = self.execute_command("%s show" % self.enable_cmd)
+ for line in out.splitlines():
+ service_name, runlevels = line.split('|')
+ service_name = service_name.strip()
+ if service_name != self.name:
+ continue
+ runlevels = re.split(r'\s+', runlevels)
+ # service already enabled for the runlevel
+ if self.enable and self.runlevel in runlevels:
+ self.changed = False
+ # service already disabled for the runlevel
+ elif not self.enable and self.runlevel not in runlevels:
+ self.changed = False
+ break
+ else:
+ # service already disabled altogether
+ if not self.enable:
+ self.changed = False
+
+ if not self.changed:
+ return
+
+ #
+ # update-rc.d style
+ #
+ if self.enable_cmd.endswith("update-rc.d"):
+
+ enabled = False
+ slinks = glob.glob('/etc/rc?.d/S??' + self.name)
+ if slinks:
+ enabled = True
+
+ if self.enable != enabled:
+ self.changed = True
+
+ if self.enable:
+ action = 'enable'
+ klinks = glob.glob('/etc/rc?.d/K??' + self.name)
+ if not klinks:
+ if not self.module.check_mode:
+ (rc, out, err) = self.execute_command("%s %s defaults" % (self.enable_cmd, self.name))
+ if rc != 0:
+ if err:
+ self.module.fail_json(msg=err)
+ else:
+ self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
+ else:
+ action = 'disable'
+
+ if not self.module.check_mode:
+ (rc, out, err) = self.execute_command("%s %s %s" % (self.enable_cmd, self.name, action))
+ if rc != 0:
+ if err:
+ self.module.fail_json(msg=err)
+ else:
+ self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
+ else:
+ self.changed = False
+
+ return
+
+ #
+ # insserv (Debian <=7, SLES, others)
+ #
+ if self.enable_cmd.endswith("insserv"):
+ if self.enable:
+ (rc, out, err) = self.execute_command("%s -n -v %s" % (self.enable_cmd, self.name))
+ else:
+ (rc, out, err) = self.execute_command("%s -n -r -v %s" % (self.enable_cmd, self.name))
+
+ self.changed = False
+ for line in err.splitlines():
+ if self.enable and line.find('enable service') != -1:
+ self.changed = True
+ break
+ if not self.enable and line.find('remove service') != -1:
+ self.changed = True
+ break
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=self.changed)
+
+ if not self.changed:
+ return
+
+ if self.enable:
+ (rc, out, err) = self.execute_command("%s %s" % (self.enable_cmd, self.name))
+ if (rc != 0) or (err != ''):
+ self.module.fail_json(msg=("Failed to install service. rc: %s, out: %s, err: %s" % (rc, out, err)))
+ return (rc, out, err)
+ else:
+ (rc, out, err) = self.execute_command("%s -r %s" % (self.enable_cmd, self.name))
+ if (rc != 0) or (err != ''):
+ self.module.fail_json(msg=("Failed to remove service. rc: %s, out: %s, err: %s" % (rc, out, err)))
+ return (rc, out, err)
+
+ #
+ # If we've gotten to the end, the service needs to be updated
+ #
+ self.changed = True
+
+ # we change argument order depending on real binary used:
+ # rc-update and systemctl need the argument order reversed
+
+ if self.enable_cmd.endswith("rc-update"):
+ args = (self.enable_cmd, action, self.name + " " + self.runlevel)
+ elif self.enable_cmd.endswith("systemctl"):
+ args = (self.enable_cmd, action, self.__systemd_unit)
+ else:
+ args = (self.enable_cmd, self.name, action)
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=self.changed)
+
+ (rc, out, err) = self.execute_command("%s %s %s" % args)
+ if rc != 0:
+ if err:
+ self.module.fail_json(msg="Error when trying to %s %s: rc=%s %s" % (action, self.name, rc, err))
+ else:
+ self.module.fail_json(msg="Failure for %s %s: rc=%s %s" % (action, self.name, rc, out))
+
+ return (rc, out, err)
+
+ def service_control(self):
+
+ # Decide what command to run
+ svc_cmd = ''
+ arguments = self.arguments
+ if self.svc_cmd:
+ if not self.svc_cmd.endswith("systemctl"):
+ if self.svc_cmd.endswith("initctl"):
+ # initctl commands take the form <cmd> <action> <name>
+ svc_cmd = self.svc_cmd
+ arguments = "%s %s" % (self.name, arguments)
+ else:
+ # SysV and OpenRC take the form <cmd> <name> <action>
+ svc_cmd = "%s %s" % (self.svc_cmd, self.name)
+ else:
+ # systemd commands take the form <cmd> <action> <name>
+ svc_cmd = self.svc_cmd
+ arguments = "%s %s" % (self.__systemd_unit, arguments)
+ elif self.svc_cmd is None and self.svc_initscript:
+ # upstart
+ svc_cmd = "%s" % self.svc_initscript
+
+ # In OpenRC, if a service crashed, we need to reset its status to
+ # stopped with the zap command, before we can start it back.
+ if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed:
+ self.execute_command("%s zap" % svc_cmd, daemonize=True)
+
+ if self.action != "restart":
+ if svc_cmd != '':
+ # upstart or systemd or OpenRC
+ rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
+ else:
+ # SysV
+ rc_state, stdout, stderr = self.execute_command("%s %s %s" % (self.action, self.name, arguments), daemonize=True)
+ elif self.svc_cmd and self.svc_cmd.endswith('rc-service'):
+ # All services in OpenRC support restart.
+ rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
+ else:
+ # In other systems, not all services support restart. Do it the hard way.
+ if svc_cmd != '':
+ # upstart or systemd
+ rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % (svc_cmd, 'stop', arguments), daemonize=True)
+ else:
+ # SysV
+ rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % ('stop', self.name, arguments), daemonize=True)
+
+ if self.sleep:
+ time.sleep(self.sleep)
+
+ if svc_cmd != '':
+ # upstart or systemd
+ rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % (svc_cmd, 'start', arguments), daemonize=True)
+ else:
+ # SysV
+ rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % ('start', self.name, arguments), daemonize=True)
+
+ # merge return information
+ if rc1 != 0 and rc2 == 0:
+ rc_state = rc2
+ stdout = stdout2
+ stderr = stderr2
+ else:
+ rc_state = rc1 + rc2
+ stdout = stdout1 + stdout2
+ stderr = stderr1 + stderr2
+
+ return (rc_state, stdout, stderr)
+
+
+class FreeBsdService(Service):
+ """
+ This is the FreeBSD Service manipulation class - it uses the /etc/rc.conf
+ file for controlling services started at boot and the 'service' binary to
+ check status and perform direct service manipulation.
+ """
+
+ platform = 'FreeBSD'
+ distribution = None
+
+ def get_service_tools(self):
+ self.svc_cmd = self.module.get_bin_path('service', True)
+ if not self.svc_cmd:
+ self.module.fail_json(msg='unable to find service binary')
+
+ self.sysrc_cmd = self.module.get_bin_path('sysrc')
+
+ def get_service_status(self):
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'onestatus', self.arguments))
+ if self.name == "pf":
+ self.running = "Enabled" in stdout
+ else:
+ if rc == 1:
+ self.running = False
+ elif rc == 0:
+ self.running = True
+
+ def service_enable(self):
+ if self.enable:
+ self.rcconf_value = "YES"
+ else:
+ self.rcconf_value = "NO"
+
+ rcfiles = ['/etc/rc.conf', '/etc/rc.conf.local', '/usr/local/etc/rc.conf']
+ for rcfile in rcfiles:
+ if os.path.isfile(rcfile):
+ self.rcconf_file = rcfile
+
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments))
+ try:
+ rcvars = shlex.split(stdout, comments=True)
+ except Exception:
+ # TODO: add a warning to the output with the failure
+ pass
+
+ if not rcvars:
+ self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
+
+ # In rare cases, i.e. sendmail, rcvar can return several key=value pairs
+ # Usually there is just one, however. In other rare cases, i.e. uwsgi,
+ # rcvar can return extra uncommented data that is not at all related to
+ # the rcvar. We will just take the first key=value pair we come across
+ # and hope for the best.
+ for rcvar in rcvars:
+ if '=' in rcvar:
+ self.rcconf_key, default_rcconf_value = rcvar.split('=', 1)
+ break
+
+ if self.rcconf_key is None:
+ self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
+
+ if self.sysrc_cmd: # FreeBSD >= 9.2
+
+ rc, current_rcconf_value, stderr = self.execute_command("%s -n %s" % (self.sysrc_cmd, self.rcconf_key))
+ # it can happen that rcvar is not set (case of a system coming from the ports collection)
+ # so we will fallback on the default
+ if rc != 0:
+ current_rcconf_value = default_rcconf_value
+
+ if current_rcconf_value.strip().upper() != self.rcconf_value:
+
+ self.changed = True
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg="changing service enablement")
+
+ rc, change_stdout, change_stderr = self.execute_command("%s %s=\"%s\"" % (self.sysrc_cmd, self.rcconf_key, self.rcconf_value))
+ if rc != 0:
+ self.module.fail_json(msg="unable to set rcvar using sysrc", stdout=change_stdout, stderr=change_stderr)
+
+ # sysrc does not exit with code 1 on permission error => validate successful change using service(8)
+ rc, check_stdout, check_stderr = self.execute_command("%s %s %s" % (self.svc_cmd, self.name, "enabled"))
+ if self.enable != (rc == 0): # rc = 0 indicates enabled service, rc = 1 indicates disabled service
+ self.module.fail_json(msg="unable to set rcvar: sysrc did not change value", stdout=change_stdout, stderr=change_stderr)
+
+ else:
+ self.changed = False
+
+ else: # Legacy (FreeBSD < 9.2)
+ try:
+ return self.service_enable_rcconf()
+ except Exception:
+ self.module.fail_json(msg='unable to set rcvar')
+
+ def service_control(self):
+
+ if self.action == "start":
+ self.action = "onestart"
+ if self.action == "stop":
+ self.action = "onestop"
+ if self.action == "reload":
+ self.action = "onereload"
+
+ ret = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments))
+
+ if self.sleep:
+ time.sleep(self.sleep)
+
+ return ret
+
+
+class DragonFlyBsdService(FreeBsdService):
+ """
+ This is the DragonFly BSD Service manipulation class - it uses the /etc/rc.conf
+ file for controlling services started at boot and the 'service' binary to
+ check status and perform direct service manipulation.
+ """
+
+ platform = 'DragonFly'
+ distribution = None
+
+ def service_enable(self):
+ if self.enable:
+ self.rcconf_value = "YES"
+ else:
+ self.rcconf_value = "NO"
+
+ rcfiles = ['/etc/rc.conf'] # Overkill?
+ for rcfile in rcfiles:
+ if os.path.isfile(rcfile):
+ self.rcconf_file = rcfile
+
+ self.rcconf_key = "%s" % self.name.replace("-", "_")
+
+ return self.service_enable_rcconf()
+
+
+class OpenBsdService(Service):
+ """
+ This is the OpenBSD Service manipulation class - it uses rcctl(8) or
+ /etc/rc.d scripts for service control. Enabling a service is
+ only supported if rcctl is present.
+ """
+
+ platform = 'OpenBSD'
+ distribution = None
+
+ def get_service_tools(self):
+ self.enable_cmd = self.module.get_bin_path('rcctl')
+
+ if self.enable_cmd:
+ self.svc_cmd = self.enable_cmd
+ else:
+ rcdir = '/etc/rc.d'
+
+ rc_script = "%s/%s" % (rcdir, self.name)
+ if os.path.isfile(rc_script):
+ self.svc_cmd = rc_script
+
+ if not self.svc_cmd:
+ self.module.fail_json(msg='unable to find svc_cmd')
+
+ def get_service_status(self):
+ if self.enable_cmd:
+ rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svc_cmd, 'check', self.name))
+ else:
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check'))
+
+ if stderr:
+ self.module.fail_json(msg=stderr)
+
+ if rc == 1:
+ self.running = False
+ elif rc == 0:
+ self.running = True
+
+ def service_control(self):
+ if self.enable_cmd:
+ return self.execute_command("%s -f %s %s" % (self.svc_cmd, self.action, self.name), daemonize=True)
+ else:
+ return self.execute_command("%s -f %s" % (self.svc_cmd, self.action))
+
+ def service_enable(self):
+ if not self.enable_cmd:
+ return super(OpenBsdService, self).service_enable()
+
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'getdef', self.name, 'flags'))
+
+ if stderr:
+ self.module.fail_json(msg=stderr)
+
+ getdef_string = stdout.rstrip()
+
+ # Depending on the service the string returned from 'getdef' may be
+ # either a set of flags or the boolean YES/NO
+ if getdef_string == "YES" or getdef_string == "NO":
+ default_flags = ''
+ else:
+ default_flags = getdef_string
+
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'flags'))
+
+ if stderr:
+ self.module.fail_json(msg=stderr)
+
+ get_string = stdout.rstrip()
+
+ # Depending on the service the string returned from 'get' may be
+ # either a set of flags or the boolean YES/NO
+ if get_string == "YES" or get_string == "NO":
+ current_flags = ''
+ else:
+ current_flags = get_string
+
+ # If there are arguments from the user we use these as flags unless
+ # they are already set.
+ if self.arguments and self.arguments != current_flags:
+ changed_flags = self.arguments
+ # If the user has not supplied any arguments and the current flags
+ # differ from the default we reset them.
+ elif not self.arguments and current_flags != default_flags:
+ changed_flags = ' '
+ # Otherwise there is no need to modify flags.
+ else:
+ changed_flags = ''
+
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'status'))
+
+ if self.enable:
+ if rc == 0 and not changed_flags:
+ return
+
+ if rc != 0:
+ status_action = "set %s status on" % (self.name)
+ else:
+ status_action = ''
+ if changed_flags:
+ flags_action = "set %s flags %s" % (self.name, changed_flags)
+ else:
+ flags_action = ''
+ else:
+ if rc == 1:
+ return
+
+ status_action = "set %s status off" % self.name
+ flags_action = ''
+
+ # Verify state assumption
+ if not status_action and not flags_action:
+ self.module.fail_json(msg="neither status_action or status_flags is set, this should never happen")
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg="changing service enablement")
+
+ status_modified = 0
+ if status_action:
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, status_action))
+
+ if rc != 0:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg="rcctl failed to modify service status")
+
+ status_modified = 1
+
+ if flags_action:
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, flags_action))
+
+ if rc != 0:
+ if stderr:
+ if status_modified:
+ error_message = "rcctl modified service status but failed to set flags: " + stderr
+ else:
+ error_message = stderr
+ else:
+ if status_modified:
+ error_message = "rcctl modified service status but failed to set flags"
+ else:
+ error_message = "rcctl failed to modify service flags"
+
+ self.module.fail_json(msg=error_message)
+
+ self.changed = True
+
+
+class NetBsdService(Service):
+ """
+ This is the NetBSD Service manipulation class - it uses the /etc/rc.conf
+ file for controlling services started at boot, check status and perform
+ direct service manipulation. Init scripts in /etc/rc.d are used for
+ controlling services (start/stop) as well as for controlling the current
+ state.
+ """
+
+ platform = 'NetBSD'
+ distribution = None
+
+ def get_service_tools(self):
+ initpaths = ['/etc/rc.d'] # better: $rc_directories - how to get in here? Run: sh -c '. /etc/rc.conf ; echo $rc_directories'
+
+ for initdir in initpaths:
+ initscript = "%s/%s" % (initdir, self.name)
+ if os.path.isfile(initscript):
+ self.svc_initscript = initscript
+
+ if not self.svc_initscript:
+ self.module.fail_json(msg='unable to find rc.d script')
+
+ def service_enable(self):
+ if self.enable:
+ self.rcconf_value = "YES"
+ else:
+ self.rcconf_value = "NO"
+
+ rcfiles = ['/etc/rc.conf'] # Overkill?
+ for rcfile in rcfiles:
+ if os.path.isfile(rcfile):
+ self.rcconf_file = rcfile
+
+ self.rcconf_key = "%s" % self.name.replace("-", "_")
+
+ return self.service_enable_rcconf()
+
+ def get_service_status(self):
+ self.svc_cmd = "%s" % self.svc_initscript
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'onestatus'))
+ if rc == 1:
+ self.running = False
+ elif rc == 0:
+ self.running = True
+
+ def service_control(self):
+ if self.action == "start":
+ self.action = "onestart"
+ if self.action == "stop":
+ self.action = "onestop"
+
+ self.svc_cmd = "%s" % self.svc_initscript
+ return self.execute_command("%s %s" % (self.svc_cmd, self.action), daemonize=True)
+
+
+class SunOSService(Service):
+ """
+ This is the SunOS Service manipulation class - it uses the svcadm
+ command for controlling services, and svcs command for checking status.
+ It also tries to be smart about taking the service out of maintenance
+ state if necessary.
+ """
+ platform = 'SunOS'
+ distribution = None
+
+ def get_service_tools(self):
+ self.svcs_cmd = self.module.get_bin_path('svcs', True)
+
+ if not self.svcs_cmd:
+ self.module.fail_json(msg='unable to find svcs binary')
+
+ self.svcadm_cmd = self.module.get_bin_path('svcadm', True)
+
+ if not self.svcadm_cmd:
+ self.module.fail_json(msg='unable to find svcadm binary')
+
+ if self.svcadm_supports_sync():
+ self.svcadm_sync = '-s'
+ else:
+ self.svcadm_sync = ''
+
+ def svcadm_supports_sync(self):
+ # Support for synchronous restart/refresh is only supported on
+ # Oracle Solaris >= 11.2
+ for line in open('/etc/release', 'r').readlines():
+ m = re.match(r'\s+Oracle Solaris (\d+)\.(\d+).*', line.rstrip())
+ if m and m.groups() >= ('11', '2'):
+ return True
+
+ def get_service_status(self):
+ status = self.get_sunos_svcs_status()
+ # Only 'online' is considered properly running. Everything else is off
+ # or has some sort of problem.
+ if status == 'online':
+ self.running = True
+ else:
+ self.running = False
+
+ def get_sunos_svcs_status(self):
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.svcs_cmd, self.name))
+ if rc == 1:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+
+ lines = stdout.rstrip("\n").split("\n")
+ status = lines[-1].split(" ")[0]
+ # status is one of: online, offline, degraded, disabled, maintenance, uninitialized
+ # see man svcs(1)
+ return status
+
+ def service_enable(self):
+ # Get current service enablement status
+ rc, stdout, stderr = self.execute_command("%s -l %s" % (self.svcs_cmd, self.name))
+
+ if rc != 0:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+
+ enabled = False
+ temporary = False
+
+ # look for enabled line, which could be one of:
+ # enabled true (temporary)
+ # enabled false (temporary)
+ # enabled true
+ # enabled false
+ for line in stdout.split("\n"):
+ if line.startswith("enabled"):
+ if "true" in line:
+ enabled = True
+ if "temporary" in line:
+ temporary = True
+
+ startup_enabled = (enabled and not temporary) or (not enabled and temporary)
+
+ if self.enable and startup_enabled:
+ return
+ elif (not self.enable) and (not startup_enabled):
+ return
+
+ if not self.module.check_mode:
+ # Mark service as started or stopped (this will have the side effect of
+ # actually stopping or starting the service)
+ if self.enable:
+ subcmd = "enable -rs"
+ else:
+ subcmd = "disable -s"
+
+ rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name))
+
+ if rc != 0:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+
+ self.changed = True
+
+ def service_control(self):
+ status = self.get_sunos_svcs_status()
+
+ # if starting or reloading, clear maintenance states
+ if self.action in ['start', 'reload', 'restart'] and status in ['maintenance', 'degraded']:
+ rc, stdout, stderr = self.execute_command("%s clear %s" % (self.svcadm_cmd, self.name))
+ if rc != 0:
+ return rc, stdout, stderr
+ status = self.get_sunos_svcs_status()
+
+ if status in ['maintenance', 'degraded']:
+ self.module.fail_json(msg="Failed to bring service out of %s status." % status)
+
+ if self.action == 'start':
+ subcmd = "enable -rst"
+ elif self.action == 'stop':
+ subcmd = "disable -st"
+ elif self.action == 'reload':
+ subcmd = "refresh %s" % (self.svcadm_sync)
+ elif self.action == 'restart' and status == 'online':
+ subcmd = "restart %s" % (self.svcadm_sync)
+ elif self.action == 'restart' and status != 'online':
+ subcmd = "enable -rst"
+
+ return self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name))
+
+
+class AIX(Service):
+ """
+ This is the AIX Service (SRC) manipulation class - it uses lssrc, startsrc, stopsrc
+ and refresh for service control. Enabling a service is currently not supported.
+ Would require to add an entry in the /etc/inittab file (mkitab, chitab and rmitab
+ commands)
+ """
+
+ platform = 'AIX'
+ distribution = None
+
+ def get_service_tools(self):
+ self.lssrc_cmd = self.module.get_bin_path('lssrc', True)
+
+ if not self.lssrc_cmd:
+ self.module.fail_json(msg='unable to find lssrc binary')
+
+ self.startsrc_cmd = self.module.get_bin_path('startsrc', True)
+
+ if not self.startsrc_cmd:
+ self.module.fail_json(msg='unable to find startsrc binary')
+
+ self.stopsrc_cmd = self.module.get_bin_path('stopsrc', True)
+
+ if not self.stopsrc_cmd:
+ self.module.fail_json(msg='unable to find stopsrc binary')
+
+ self.refresh_cmd = self.module.get_bin_path('refresh', True)
+
+ if not self.refresh_cmd:
+ self.module.fail_json(msg='unable to find refresh binary')
+
+ def get_service_status(self):
+ status = self.get_aix_src_status()
+ # Only 'active' is considered properly running. Everything else is off
+ # or has some sort of problem.
+ if status == 'active':
+ self.running = True
+ else:
+ self.running = False
+
+ def get_aix_src_status(self):
+ # Check subsystem status
+ rc, stdout, stderr = self.execute_command("%s -s %s" % (self.lssrc_cmd, self.name))
+ if rc == 1:
+ # If check for subsystem is not ok, check if service name is a
+ # group subsystem
+ rc, stdout, stderr = self.execute_command("%s -g %s" % (self.lssrc_cmd, self.name))
+ if rc == 1:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+ else:
+ # Check all subsystem status, if one subsystem is not active
+ # the group is considered not active.
+ lines = stdout.splitlines()
+ for state in lines[1:]:
+ if state.split()[-1].strip() != "active":
+ status = state.split()[-1].strip()
+ break
+ else:
+ status = "active"
+
+ # status is one of: active, inoperative
+ return status
+ else:
+ lines = stdout.rstrip("\n").split("\n")
+ status = lines[-1].split(" ")[-1]
+
+ # status is one of: active, inoperative
+ return status
+
+ def service_control(self):
+
+ # Check if service name is a subsystem of a group subsystem
+ rc, stdout, stderr = self.execute_command("%s -a" % (self.lssrc_cmd))
+ if rc == 1:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+ else:
+ lines = stdout.splitlines()
+ subsystems = []
+ groups = []
+ for line in lines[1:]:
+ subsystem = line.split()[0].strip()
+ group = line.split()[1].strip()
+ subsystems.append(subsystem)
+ if group:
+ groups.append(group)
+
+ # Define if service name parameter:
+ # -s subsystem or -g group subsystem
+ if self.name in subsystems:
+ srccmd_parameter = "-s"
+ elif self.name in groups:
+ srccmd_parameter = "-g"
+
+ if self.action == 'start':
+ srccmd = self.startsrc_cmd
+ elif self.action == 'stop':
+ srccmd = self.stopsrc_cmd
+ elif self.action == 'reload':
+ srccmd = self.refresh_cmd
+ elif self.action == 'restart':
+ self.execute_command("%s %s %s" % (self.stopsrc_cmd, srccmd_parameter, self.name))
+ if self.sleep:
+ time.sleep(self.sleep)
+ srccmd = self.startsrc_cmd
+
+ if self.arguments and self.action in ('start', 'restart'):
+ return self.execute_command("%s -a \"%s\" %s %s" % (srccmd, self.arguments, srccmd_parameter, self.name))
+ else:
+ return self.execute_command("%s %s %s" % (srccmd, srccmd_parameter, self.name))
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['started', 'stopped', 'reloaded', 'restarted']),
+ sleep=dict(type='int'),
+ pattern=dict(type='str'),
+ enabled=dict(type='bool'),
+ runlevel=dict(type='str', default='default'),
+ arguments=dict(type='str', default='', aliases=['args']),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled']],
+ )
+
+ service = Service(module)
+
+ module.debug('Service instantiated - platform %s' % service.platform)
+ if service.distribution:
+ module.debug('Service instantiated - distribution %s' % service.distribution)
+
+ rc = 0
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = service.name
+
+ # Find service management tools
+ service.get_service_tools()
+
+ # Enable/disable service startup at boot if requested
+ if service.module.params['enabled'] is not None:
+ # FIXME: ideally this should detect if we need to toggle the enablement state, though
+ # it's unlikely the changed handler would need to fire in this case so it's a minor thing.
+ service.service_enable()
+ result['enabled'] = service.enable
+
+ if module.params['state'] is None:
+ # Not changing the running state, so bail out now.
+ result['changed'] = service.changed
+ module.exit_json(**result)
+
+ result['state'] = service.state
+
+ # Collect service status
+ if service.pattern:
+ service.check_ps()
+ else:
+ service.get_service_status()
+
+ # Calculate if request will change service state
+ service.check_service_changed()
+
+ # Modify service state if necessary
+ (rc, out, err) = service.modify_service_state()
+
+ if rc != 0:
+ if err and "Job is already running" in err:
+ # upstart got confused, one such possibility is MySQL on Ubuntu 12.04
+ # where status may report it has no start/stop links and we could
+ # not get accurate status
+ pass
+ else:
+ if err:
+ module.fail_json(msg=err)
+ else:
+ module.fail_json(msg=out)
+
+ result['changed'] = service.changed | service.svc_change
+ if service.module.params['enabled'] is not None:
+ result['enabled'] = service.module.params['enabled']
+
+ if not service.module.params['state']:
+ status = service.get_service_status()
+ if status is None:
+ result['state'] = 'absent'
+ elif status is False:
+ result['state'] = 'started'
+ else:
+ result['state'] = 'stopped'
+ else:
+ # as we may have just bounced the service the service command may not
+ # report accurate state at this moment so just show what we ran
+ if service.module.params['state'] in ['reloaded', 'restarted', 'started']:
+ result['state'] = 'started'
+ else:
+ result['state'] = 'stopped'
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/service_facts.py b/lib/ansible/modules/service_facts.py
new file mode 100644
index 0000000..d2fbfad
--- /dev/null
+++ b/lib/ansible/modules/service_facts.py
@@ -0,0 +1,411 @@
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# originally copied from AWX's scan_services module to bring this functionality
+# into Core
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: service_facts
+short_description: Return service state information as fact data
+description:
+ - Return service state information as fact data for various service management utilities.
+version_added: "2.5"
+requirements: ["Any of the following supported init systems: systemd, sysv, upstart, openrc, AIX SRC"]
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.facts
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ facts:
+ support: full
+ platform:
+ platforms: posix
+notes:
+ - When accessing the C(ansible_facts.services) facts collected by this module,
+ it is recommended to not use "dot notation" because services can have a C(-)
+ character in their name which would result in invalid "dot notation", such as
+ C(ansible_facts.services.zuul-gateway). It is instead recommended to
+ using the string value of the service name as the key in order to obtain
+ the fact data value like C(ansible_facts.services['zuul-gateway'])
+ - AIX SRC was added in version 2.11.
+author:
+ - Adam Miller (@maxamillion)
+'''
+
+EXAMPLES = r'''
+- name: Populate service facts
+ ansible.builtin.service_facts:
+
+- name: Print service facts
+ ansible.builtin.debug:
+ var: ansible_facts.services
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Facts to add to ansible_facts about the services on the system
+ returned: always
+ type: complex
+ contains:
+ services:
+ description: States of the services with service name as key.
+ returned: always
+ type: complex
+ contains:
+ source:
+ description:
+ - Init system of the service.
+ - One of C(rcctl), C(systemd), C(sysv), C(upstart), C(src).
+ returned: always
+ type: str
+ sample: sysv
+ state:
+ description:
+ - State of the service.
+ - 'This commonly includes (but is not limited to) the following: C(failed), C(running), C(stopped) or C(unknown).'
+ - Depending on the used init system additional states might be returned.
+ returned: always
+ type: str
+ sample: running
+ status:
+ description:
+ - State of the service.
+ - Either C(enabled), C(disabled), C(static), C(indirect) or C(unknown).
+ returned: systemd systems or RedHat/SUSE flavored sysvinit/upstart or OpenBSD
+ type: str
+ sample: enabled
+ name:
+ description: Name of the service.
+ returned: always
+ type: str
+ sample: arp-ethers.service
+'''
+
+
+import os
+import platform
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+
+
+class BaseService(object):
+
+ def __init__(self, module):
+ self.module = module
+
+
+class ServiceScanService(BaseService):
+
+ def _list_sysvinit(self, services):
+ rc, stdout, stderr = self.module.run_command("%s --status-all" % self.service_path)
+ if rc == 4 and not os.path.exists('/etc/init.d'):
+ # This function is not intended to run on Red Hat but it could happen
+ # if `chkconfig` is not installed. `service` on RHEL9 returns rc 4
+ # when /etc/init.d is missing, add the extra guard of checking /etc/init.d
+ # instead of solely relying on rc == 4
+ return
+ if rc != 0:
+ self.module.warn("Unable to query 'service' tool (%s): %s" % (rc, stderr))
+ p = re.compile(r'^\s*\[ (?P<state>\+|\-) \]\s+(?P<name>.+)$', flags=re.M)
+ for match in p.finditer(stdout):
+ service_name = match.group('name')
+ if match.group('state') == "+":
+ service_state = "running"
+ else:
+ service_state = "stopped"
+ services[service_name] = {"name": service_name, "state": service_state, "source": "sysv"}
+
+ def _list_upstart(self, services):
+ p = re.compile(r'^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
+ rc, stdout, stderr = self.module.run_command("%s list" % self.initctl_path)
+ if rc != 0:
+ self.module.warn('Unable to query upstart for service data: %s' % stderr)
+ else:
+ real_stdout = stdout.replace("\r", "")
+ for line in real_stdout.split("\n"):
+ m = p.match(line)
+ if not m:
+ continue
+ service_name = m.group('name')
+ service_goal = m.group('goal')
+ service_state = m.group('state')
+ if m.group('pid'):
+ pid = m.group('pid')
+ else:
+ pid = None # NOQA
+ payload = {"name": service_name, "state": service_state, "goal": service_goal, "source": "upstart"}
+ services[service_name] = payload
+
+ def _list_rh(self, services):
+
+ p = re.compile(
+ r'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
+ r'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
+ rc, stdout, stderr = self.module.run_command('%s' % self.chkconfig_path, use_unsafe_shell=True)
+ # Check for special cases where stdout does not fit pattern
+ match_any = False
+ for line in stdout.split('\n'):
+ if p.match(line):
+ match_any = True
+ if not match_any:
+ p_simple = re.compile(r'(?P<service>.*?)\s+(?P<rl0>on|off)')
+ match_any = False
+ for line in stdout.split('\n'):
+ if p_simple.match(line):
+ match_any = True
+ if match_any:
+ # Try extra flags " -l --allservices" needed for SLES11
+ rc, stdout, stderr = self.module.run_command('%s -l --allservices' % self.chkconfig_path, use_unsafe_shell=True)
+ elif '--list' in stderr:
+ # Extra flag needed for RHEL5
+ rc, stdout, stderr = self.module.run_command('%s --list' % self.chkconfig_path, use_unsafe_shell=True)
+
+ for line in stdout.split('\n'):
+ m = p.match(line)
+ if m:
+ service_name = m.group('service')
+ service_state = 'stopped'
+ service_status = "disabled"
+ if m.group('rl3') == 'on':
+ service_status = "enabled"
+ rc, stdout, stderr = self.module.run_command('%s %s status' % (self.service_path, service_name), use_unsafe_shell=True)
+ service_state = rc
+ if rc in (0,):
+ service_state = 'running'
+ # elif rc in (1,3):
+ else:
+ output = stderr.lower()
+ for x in ('root', 'permission', 'not in sudoers'):
+ if x in output:
+ self.module.warn('Insufficient permissions to query sysV service "%s" and their states' % service_name)
+ break
+ else:
+ service_state = 'stopped'
+
+ service_data = {"name": service_name, "state": service_state, "status": service_status, "source": "sysv"}
+ services[service_name] = service_data
+
+ def _list_openrc(self, services):
+ all_services_runlevels = {}
+ rc, stdout, stderr = self.module.run_command("%s -a -s -m 2>&1 | grep '^ ' | tr -d '[]'" % self.rc_status_path, use_unsafe_shell=True)
+ rc_u, stdout_u, stderr_u = self.module.run_command("%s show -v 2>&1 | grep '|'" % self.rc_update_path, use_unsafe_shell=True)
+ for line in stdout_u.split('\n'):
+ line_data = line.split('|')
+ if len(line_data) < 2:
+ continue
+ service_name = line_data[0].strip()
+ runlevels = line_data[1].strip()
+ if not runlevels:
+ all_services_runlevels[service_name] = None
+ else:
+ all_services_runlevels[service_name] = runlevels.split()
+ for line in stdout.split('\n'):
+ line_data = line.split()
+ if len(line_data) < 2:
+ continue
+ service_name = line_data[0]
+ service_state = line_data[1]
+ service_runlevels = all_services_runlevels[service_name]
+ service_data = {"name": service_name, "runlevels": service_runlevels, "state": service_state, "source": "openrc"}
+ services[service_name] = service_data
+
+ def gather_services(self):
+ services = {}
+
+ # find cli tools if available
+ self.service_path = self.module.get_bin_path("service")
+ self.chkconfig_path = self.module.get_bin_path("chkconfig")
+ self.initctl_path = self.module.get_bin_path("initctl")
+ self.rc_status_path = self.module.get_bin_path("rc-status")
+ self.rc_update_path = self.module.get_bin_path("rc-update")
+
+ # TODO: review conditionals ... they should not be this 'exclusive'
+ if self.service_path and self.chkconfig_path is None and self.rc_status_path is None:
+ self._list_sysvinit(services)
+ if self.initctl_path and self.chkconfig_path is None:
+ self._list_upstart(services)
+ elif self.chkconfig_path:
+ self._list_rh(services)
+ elif self.rc_status_path is not None and self.rc_update_path is not None:
+ self._list_openrc(services)
+ return services
+
+
+class SystemctlScanService(BaseService):
+
+ BAD_STATES = frozenset(['not-found', 'masked', 'failed'])
+
+ def systemd_enabled(self):
+ # Check if init is the systemd command, using comm as cmdline could be symlink
+ try:
+ f = open('/proc/1/comm', 'r')
+ except IOError:
+ # If comm doesn't exist, old kernel, no systemd
+ return False
+ for line in f:
+ if 'systemd' in line:
+ return True
+ return False
+
+ def _list_from_units(self, systemctl_path, services):
+
+ # list units as systemd sees them
+ rc, stdout, stderr = self.module.run_command("%s list-units --no-pager --type service --all" % systemctl_path, use_unsafe_shell=True)
+ if rc != 0:
+ self.module.warn("Could not list units from systemd: %s" % stderr)
+ else:
+ for line in [svc_line for svc_line in stdout.split('\n') if '.service' in svc_line]:
+
+ state_val = "stopped"
+ status_val = "unknown"
+ fields = line.split()
+ for bad in self.BAD_STATES:
+ if bad in fields: # dot is 0
+ status_val = bad
+ fields = fields[1:]
+ break
+ else:
+ # active/inactive
+ status_val = fields[2]
+
+ # array is normalize so predictable now
+ service_name = fields[0]
+ if fields[3] == "running":
+ state_val = "running"
+
+ services[service_name] = {"name": service_name, "state": state_val, "status": status_val, "source": "systemd"}
+
+ def _list_from_unit_files(self, systemctl_path, services):
+
+ # now try unit files for complete picture and final 'status'
+ rc, stdout, stderr = self.module.run_command("%s list-unit-files --no-pager --type service --all" % systemctl_path, use_unsafe_shell=True)
+ if rc != 0:
+ self.module.warn("Could not get unit files data from systemd: %s" % stderr)
+ else:
+ for line in [svc_line for svc_line in stdout.split('\n') if '.service' in svc_line]:
+ # there is one more column (VENDOR PRESET) from `systemctl list-unit-files` for systemd >= 245
+ try:
+ service_name, status_val = line.split()[:2]
+ except IndexError:
+ self.module.fail_json(msg="Malformed output discovered from systemd list-unit-files: {0}".format(line))
+ if service_name not in services:
+ rc, stdout, stderr = self.module.run_command("%s show %s --property=ActiveState" % (systemctl_path, service_name), use_unsafe_shell=True)
+ state = 'unknown'
+ if not rc and stdout != '':
+ state = stdout.replace('ActiveState=', '').rstrip()
+ services[service_name] = {"name": service_name, "state": state, "status": status_val, "source": "systemd"}
+ elif services[service_name]["status"] not in self.BAD_STATES:
+ services[service_name]["status"] = status_val
+
+ def gather_services(self):
+
+ services = {}
+ if self.systemd_enabled():
+ systemctl_path = self.module.get_bin_path("systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"])
+ if systemctl_path:
+ self._list_from_units(systemctl_path, services)
+ self._list_from_unit_files(systemctl_path, services)
+
+ return services
+
+
+class AIXScanService(BaseService):
+
+ def gather_services(self):
+
+ services = {}
+ if platform.system() == 'AIX':
+ lssrc_path = self.module.get_bin_path("lssrc")
+ if lssrc_path:
+ rc, stdout, stderr = self.module.run_command("%s -a" % lssrc_path)
+ if rc != 0:
+ self.module.warn("lssrc could not retrieve service data (%s): %s" % (rc, stderr))
+ else:
+ for line in stdout.split('\n'):
+ line_data = line.split()
+ if len(line_data) < 2:
+ continue # Skipping because we expected more data
+ if line_data[0] == "Subsystem":
+ continue # Skip header
+ service_name = line_data[0]
+ if line_data[-1] == "active":
+ service_state = "running"
+ elif line_data[-1] == "inoperative":
+ service_state = "stopped"
+ else:
+ service_state = "unknown"
+ services[service_name] = {"name": service_name, "state": service_state, "source": "src"}
+ return services
+
+
+class OpenBSDScanService(BaseService):
+
+ def query_rcctl(self, cmd):
+ svcs = []
+ rc, stdout, stderr = self.module.run_command("%s ls %s" % (self.rcctl_path, cmd))
+ if 'needs root privileges' in stderr.lower():
+ self.module.warn('rcctl requires root privileges')
+ else:
+ for svc in stdout.split('\n'):
+ if svc == '':
+ continue
+ else:
+ svcs.append(svc)
+ return svcs
+
+ def gather_services(self):
+
+ services = {}
+ self.rcctl_path = self.module.get_bin_path("rcctl")
+ if self.rcctl_path:
+
+ for svc in self.query_rcctl('all'):
+ services[svc] = {'name': svc, 'source': 'rcctl'}
+
+ for svc in self.query_rcctl('on'):
+ services[svc].update({'status': 'enabled'})
+
+ for svc in self.query_rcctl('started'):
+ services[svc].update({'state': 'running'})
+
+ # Based on the list of services that are enabled, determine which are disabled
+ [services[svc].update({'status': 'disabled'}) for svc in services if services[svc].get('status') is None]
+
+ # and do the same for those are aren't running
+ [services[svc].update({'state': 'stopped'}) for svc in services if services[svc].get('state') is None]
+
+ # Override the state for services which are marked as 'failed'
+ for svc in self.query_rcctl('failed'):
+ services[svc].update({'state': 'failed'})
+
+ return services
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(), supports_check_mode=True)
+ locale = get_best_parsable_locale(module)
+ module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale)
+ service_modules = (ServiceScanService, SystemctlScanService, AIXScanService, OpenBSDScanService)
+ all_services = {}
+ for svc_module in service_modules:
+ svcmod = svc_module(module)
+ svc = svcmod.gather_services()
+ if svc:
+ all_services.update(svc)
+ if len(all_services) == 0:
+ results = dict(skipped=True, msg="Failed to find any services. This can be due to privileges or some other configuration issue.")
+ else:
+ results = dict(ansible_facts=dict(services=all_services))
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/set_fact.py b/lib/ansible/modules/set_fact.py
new file mode 100644
index 0000000..5cb1f7d
--- /dev/null
+++ b/lib/ansible/modules/set_fact.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: set_fact
+short_description: Set host variable(s) and fact(s).
+version_added: "1.2"
+description:
+ - This action allows setting variables associated to the current host.
+ - These variables will be available to subsequent plays during an ansible-playbook run via the host they were set on.
+ - Set C(cacheable) to C(true) to save variables across executions using a fact cache.
+ Variables will keep the set_fact precedence for the current run, but will used 'cached fact' precedence for subsequent ones.
+ - Per the standard Ansible variable precedence rules, other types of variables have a higher priority, so this value may be overridden.
+options:
+ key_value:
+ description:
+ - "The C(set_fact) module takes C(key=value) pairs or C(key: value) (YAML notation) as variables to set in the playbook scope.
+ The 'key' is the resulting variable name and the value is, of course, the value of said variable."
+ - You can create multiple variables at once, by supplying multiple pairs, but do NOT mix notations.
+ required: true
+ cacheable:
+ description:
+ - This boolean converts the variable into an actual 'fact' which will also be added to the fact cache.
+ It does not enable fact caching across runs, it just means it will work with it if already enabled.
+ - Normally this module creates 'host level variables' and has much higher precedence, this option changes the nature and precedence
+ (by 7 steps) of the variable created.
+ U(https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable)
+ - "This actually creates 2 copies of the variable, a normal 'set_fact' host variable with high precedence and
+ a lower 'ansible_fact' one that is available for persistence via the facts cache plugin.
+ This creates a possibly confusing interaction with C(meta: clear_facts) as it will remove the 'ansible_fact' but not the host variable."
+ type: bool
+ default: no
+ version_added: "2.4"
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+attributes:
+ action:
+ details: While the action plugin does do some of the work it relies on the core engine to actually create the variables, that part cannot be overriden
+ support: partial
+ bypass_host_loop:
+ support: none
+ bypass_task_loop:
+ support: none
+ check_mode:
+ support: full
+ core:
+ details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
+ support: partial
+ delegation:
+ details:
+ - while variable assignment can be delegated to a different host the execution context is always the current inventory_hostname
+ - connection variables, if set at all, would reflect the host it would target, even if we are not connecting at all in this case
+ support: partial
+ diff_mode:
+ support: none
+notes:
+ - Because of the nature of tasks, set_fact will produce 'static' values for a variable.
+ Unlike normal 'lazy' variables, the value gets evaluated and templated on assignment.
+ - Some boolean values (yes, no, true, false) will always be converted to boolean type,
+ unless C(DEFAULT_JINJA2_NATIVE) is enabled. This is done so the C(var=value) booleans,
+ otherwise it would only be able to create strings, but it also prevents using those values to create YAML strings.
+ Using the setting will restrict k=v to strings, but will allow you to specify string or boolean in YAML.
+ - "To create lists/arrays or dictionary/hashes use YAML notation C(var: [val1, val2])."
+ - Since 'cacheable' is now a module param, 'cacheable' is no longer a valid fact name.
+seealso:
+- module: ansible.builtin.include_vars
+- ref: ansible_variable_precedence
+ description: More information related to variable precedence and which type of variable wins over others.
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Setting host facts using key=value pairs, this format can only create strings or booleans
+ ansible.builtin.set_fact: one_fact="something" other_fact="{{ local_var }}"
+
+- name: Setting host facts using complex arguments
+ ansible.builtin.set_fact:
+ one_fact: something
+ other_fact: "{{ local_var * 2 }}"
+ another_fact: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
+
+- name: Setting facts so that they will be persisted in the fact cache
+ ansible.builtin.set_fact:
+ one_fact: something
+ other_fact: "{{ local_var * 2 }}"
+ cacheable: yes
+
+- name: Creating list and dictionary variables
+ ansible.builtin.set_fact:
+ one_dict:
+ something: here
+ other: there
+ one_list:
+ - a
+ - b
+ - c
+# As of Ansible 1.8, Ansible will convert boolean strings ('true', 'false', 'yes', 'no')
+# to proper boolean values when using the key=value syntax, however it is still
+# recommended that booleans be set using the complex argument style:
+- name: Setting booleans using complex argument style
+ ansible.builtin.set_fact:
+ one_fact: yes
+ other_fact: no
+
+- name: Creating list and dictionary variables using 'shorthand' YAML
+ ansible.builtin.set_fact:
+ two_dict: {'something': here2, 'other': somewhere}
+ two_list: [1,2,3]
+'''
diff --git a/lib/ansible/modules/set_stats.py b/lib/ansible/modules/set_stats.py
new file mode 100644
index 0000000..16d7bfe
--- /dev/null
+++ b/lib/ansible/modules/set_stats.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible RedHat, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: set_stats
+short_description: Define and display stats for the current ansible run
+description:
+ - This module allows setting/accumulating stats on the current ansible run, either per host or for all hosts in the run.
+ - This module is also supported for Windows targets.
+author: Brian Coca (@bcoca)
+options:
+ data:
+ description:
+ - A dictionary of which each key represents a stat (or variable) you want to keep track of.
+ type: dict
+ required: true
+ per_host:
+ description:
+ - whether the stats are per host or for all hosts in the run.
+ type: bool
+ default: no
+ aggregate:
+ description:
+ - Whether the provided value is aggregated to the existing stat C(true) or will replace it C(false).
+ type: bool
+ default: yes
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+attributes:
+ action:
+ details: While the action plugin does do some of the work it relies on the core engine to actually create the variables, that part cannot be overridden
+ support: partial
+ bypass_host_loop:
+ support: none
+ bypass_task_loop:
+ support: none
+ core:
+ details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
+ support: partial
+ check_mode:
+ support: full
+ delegation:
+ support: none
+ diff_mode:
+ support: none
+notes:
+ - In order for custom stats to be displayed, you must set C(show_custom_stats) in section C([defaults]) in C(ansible.cfg)
+ or by defining environment variable C(ANSIBLE_SHOW_CUSTOM_STATS) to C(true). See the C(default) callback plugin for details.
+version_added: "2.3"
+'''
+
+EXAMPLES = r'''
+- name: Aggregating packages_installed stat per host
+ ansible.builtin.set_stats:
+ data:
+ packages_installed: 31
+ per_host: yes
+
+- name: Aggregating random stats for all hosts using complex arguments
+ ansible.builtin.set_stats:
+ data:
+ one_stat: 11
+ other_stat: "{{ local_var * 2 }}"
+ another_stat: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
+ per_host: no
+
+- name: Setting stats (not aggregating)
+ ansible.builtin.set_stats:
+ data:
+ the_answer: 42
+ aggregate: no
+'''
diff --git a/lib/ansible/modules/setup.py b/lib/ansible/modules/setup.py
new file mode 100644
index 0000000..df2a67f
--- /dev/null
+++ b/lib/ansible/modules/setup.py
@@ -0,0 +1,230 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: setup
+version_added: historical
+short_description: Gathers facts about remote hosts
+options:
+ gather_subset:
+ version_added: "2.1"
+ description:
+ - "If supplied, restrict the additional facts collected to the given subset.
+ Possible values: C(all), C(all_ipv4_addresses), C(all_ipv6_addresses), C(apparmor), C(architecture),
+ C(caps), C(chroot),C(cmdline), C(date_time), C(default_ipv4), C(default_ipv6), C(devices),
+ C(distribution), C(distribution_major_version), C(distribution_release), C(distribution_version),
+ C(dns), C(effective_group_ids), C(effective_user_id), C(env), C(facter), C(fips), C(hardware),
+ C(interfaces), C(is_chroot), C(iscsi), C(kernel), C(local), C(lsb), C(machine), C(machine_id),
+ C(mounts), C(network), C(ohai), C(os_family), C(pkg_mgr), C(platform), C(processor), C(processor_cores),
+ C(processor_count), C(python), C(python_version), C(real_user_id), C(selinux), C(service_mgr),
+ C(ssh_host_key_dsa_public), C(ssh_host_key_ecdsa_public), C(ssh_host_key_ed25519_public),
+ C(ssh_host_key_rsa_public), C(ssh_host_pub_keys), C(ssh_pub_keys), C(system), C(system_capabilities),
+ C(system_capabilities_enforced), C(user), C(user_dir), C(user_gecos), C(user_gid), C(user_id),
+ C(user_shell), C(user_uid), C(virtual), C(virtualization_role), C(virtualization_type).
+ Can specify a list of values to specify a larger subset.
+ Values can also be used with an initial C(!) to specify that
+ that specific subset should not be collected. For instance:
+ C(!hardware,!network,!virtual,!ohai,!facter). If C(!all) is specified
+ then only the min subset is collected. To avoid collecting even the
+ min subset, specify C(!all,!min). To collect only specific facts,
+ use C(!all,!min), and specify the particular fact subsets.
+ Use the filter parameter if you do not want to display some collected
+ facts."
+ type: list
+ elements: str
+ default: "all"
+ gather_timeout:
+ version_added: "2.2"
+ description:
+ - Set the default timeout in seconds for individual fact gathering.
+ type: int
+ default: 10
+ filter:
+ version_added: "1.1"
+ description:
+ - If supplied, only return facts that match one of the shell-style
+ (fnmatch) pattern. An empty list basically means 'no filter'.
+ As of Ansible 2.11, the type has changed from string to list
+ and the default has became an empty list. A simple string is
+ still accepted and works as a single pattern. The behaviour
+ prior to Ansible 2.11 remains.
+ type: list
+ elements: str
+ default: []
+ fact_path:
+ version_added: "1.3"
+ description:
+ - Path used for local ansible facts (C(*.fact)) - files in this dir
+ will be run (if executable) and their results be added to C(ansible_local) facts.
+ If a file is not executable it is read instead.
+ File/results format can be JSON or INI-format. The default C(fact_path) can be
+ specified in C(ansible.cfg) for when setup is automatically called as part of
+ C(gather_facts).
+ NOTE - For windows clients, the results will be added to a variable named after the
+ local file (without extension suffix), rather than C(ansible_local).
+ - Since Ansible 2.1, Windows hosts can use C(fact_path). Make sure that this path
+ exists on the target host. Files in this path MUST be PowerShell scripts C(.ps1)
+ which outputs an object. This object will be formatted by Ansible as json so the
+ script should be outputting a raw hashtable, array, or other primitive object.
+ type: path
+ default: /etc/ansible/facts.d
+description:
+ - This module is automatically called by playbooks to gather useful
+ variables about remote hosts that can be used in playbooks. It can also be
+ executed directly by C(/usr/bin/ansible) to check what variables are
+ available to a host. Ansible provides many I(facts) about the system,
+ automatically.
+ - This module is also supported for Windows targets.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.facts
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ facts:
+ support: full
+ platform:
+ platforms: posix, windows
+notes:
+ - More ansible facts will be added with successive releases. If I(facter) or
+ I(ohai) are installed, variables from these programs will also be snapshotted
+ into the JSON file for usage in templating. These variables are prefixed
+ with C(facter_) and C(ohai_) so it's easy to tell their source. All variables are
+ bubbled up to the caller. Using the ansible facts and choosing to not
+ install I(facter) and I(ohai) means you can avoid Ruby-dependencies on your
+ remote systems. (See also M(community.general.facter) and M(community.general.ohai).)
+ - The filter option filters only the first level subkey below ansible_facts.
+ - If the target host is Windows, you will not currently have the ability to use
+ C(filter) as this is provided by a simpler implementation of the module.
+ - This module should be run with elevated privileges on BSD systems to gather facts like ansible_product_version.
+ - For more information about delegated facts,
+ please check U(https://docs.ansible.com/ansible/latest/user_guide/playbooks_delegation.html#delegating-facts).
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+'''
+
+EXAMPLES = r"""
+# Display facts from all hosts and store them indexed by I(hostname) at C(/tmp/facts).
+# ansible all -m ansible.builtin.setup --tree /tmp/facts
+
+# Display only facts regarding memory found by ansible on all hosts and output them.
+# ansible all -m ansible.builtin.setup -a 'filter=ansible_*_mb'
+
+# Display only facts returned by facter.
+# ansible all -m ansible.builtin.setup -a 'filter=facter_*'
+
+# Collect only facts returned by facter.
+# ansible all -m ansible.builtin.setup -a 'gather_subset=!all,facter'
+
+- name: Collect only facts returned by facter
+ ansible.builtin.setup:
+ gather_subset:
+ - '!all'
+ - '!<any valid subset>'
+ - facter
+
+- name: Filter and return only selected facts
+ ansible.builtin.setup:
+ filter:
+ - 'ansible_distribution'
+ - 'ansible_machine_id'
+ - 'ansible_*_mb'
+
+# Display only facts about certain interfaces.
+# ansible all -m ansible.builtin.setup -a 'filter=ansible_eth[0-2]'
+
+# Restrict additional gathered facts to network and virtual (includes default minimum facts)
+# ansible all -m ansible.builtin.setup -a 'gather_subset=network,virtual'
+
+# Collect only network and virtual (excludes default minimum facts)
+# ansible all -m ansible.builtin.setup -a 'gather_subset=!all,network,virtual'
+
+# Do not call puppet facter or ohai even if present.
+# ansible all -m ansible.builtin.setup -a 'gather_subset=!facter,!ohai'
+
+# Only collect the default minimum amount of facts:
+# ansible all -m ansible.builtin.setup -a 'gather_subset=!all'
+
+# Collect no facts, even the default minimum subset of facts:
+# ansible all -m ansible.builtin.setup -a 'gather_subset=!all,!min'
+
+# Display facts from Windows hosts with custom facts stored in C:\custom_facts.
+# ansible windows -m ansible.builtin.setup -a "fact_path='c:\custom_facts'"
+
+# Gathers facts for the machines in the dbservers group (a.k.a Delegating facts)
+- hosts: app_servers
+ tasks:
+ - name: Gather facts from db servers
+ ansible.builtin.setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups['dbservers'] }}"
+"""
+
+# import module snippets
+from ..module_utils.basic import AnsibleModule
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.facts import ansible_collector, default_collectors
+from ansible.module_utils.facts.collector import CollectorNotFoundError, CycleFoundInFactDeps, UnresolvedFactDep
+from ansible.module_utils.facts.namespace import PrefixFactNamespace
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ gather_subset=dict(default=["all"], required=False, type='list', elements='str'),
+ gather_timeout=dict(default=10, required=False, type='int'),
+ filter=dict(default=[], required=False, type='list', elements='str'),
+ fact_path=dict(default='/etc/ansible/facts.d', required=False, type='path'),
+ ),
+ supports_check_mode=True,
+ )
+
+ gather_subset = module.params['gather_subset']
+ gather_timeout = module.params['gather_timeout']
+ filter_spec = module.params['filter']
+
+ # TODO: this mimics existing behavior where gather_subset=["!all"] actually means
+ # to collect nothing except for the below list
+ # TODO: decide what '!all' means, I lean towards making it mean none, but likely needs
+ # some tweaking on how gather_subset operations are performed
+ minimal_gather_subset = frozenset(['apparmor', 'caps', 'cmdline', 'date_time',
+ 'distribution', 'dns', 'env', 'fips', 'local',
+ 'lsb', 'pkg_mgr', 'platform', 'python', 'selinux',
+ 'service_mgr', 'ssh_pub_keys', 'user'])
+
+ all_collector_classes = default_collectors.collectors
+
+ # rename namespace_name to root_key?
+ namespace = PrefixFactNamespace(namespace_name='ansible',
+ prefix='ansible_')
+
+ try:
+ fact_collector = ansible_collector.get_ansible_collector(all_collector_classes=all_collector_classes,
+ namespace=namespace,
+ filter_spec=filter_spec,
+ gather_subset=gather_subset,
+ gather_timeout=gather_timeout,
+ minimal_gather_subset=minimal_gather_subset)
+ except (TypeError, CollectorNotFoundError, CycleFoundInFactDeps, UnresolvedFactDep) as e:
+ # bad subset given, collector, idk, deps declared but not found
+ module.fail_json(msg=to_text(e))
+
+ facts_dict = fact_collector.collect(module=module)
+
+ module.exit_json(ansible_facts=facts_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/shell.py b/lib/ansible/modules/shell.py
new file mode 100644
index 0000000..52fda1b
--- /dev/null
+++ b/lib/ansible/modules/shell.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# There is no actual shell module source, when you use 'shell' in ansible,
+# it runs the 'command' module with special arguments and it behaves differently.
+# See the command source and the comment "#USE_SHELL".
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: shell
+short_description: Execute shell commands on targets
+description:
+ - The C(shell) module takes the command name followed by a list of space-delimited arguments.
+ - Either a free form command or C(cmd) parameter is required, see the examples.
+ - It is almost exactly like the M(ansible.builtin.command) module but runs
+ the command through a shell (C(/bin/sh)) on the remote node.
+ - For Windows targets, use the M(ansible.windows.win_shell) module instead.
+version_added: "0.2"
+options:
+ free_form:
+ description:
+ - The shell module takes a free form command to run, as a string.
+ - There is no actual parameter named 'free form'.
+ - See the examples on how to use this module.
+ type: str
+ cmd:
+ type: str
+ description:
+ - The command to run followed by optional arguments.
+ creates:
+ description:
+ - A filename, when it already exists, this step will B(not) be run.
+ type: path
+ removes:
+ description:
+ - A filename, when it does not exist, this step will B(not) be run.
+ type: path
+ version_added: "0.8"
+ chdir:
+ description:
+ - Change into this directory before running the command.
+ type: path
+ version_added: "0.6"
+ executable:
+ description:
+ - Change the shell used to execute the command.
+ - This expects an absolute path to the executable.
+ type: path
+ version_added: "0.9"
+ stdin:
+ description:
+ - Set the stdin of the command directly to the specified value.
+ type: str
+ version_added: "2.4"
+ stdin_add_newline:
+ description:
+ - Whether to append a newline to stdin data.
+ type: bool
+ default: yes
+ version_added: "2.8"
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.raw
+attributes:
+ check_mode:
+ details: while the command itself is arbitrary and cannot be subject to the check mode semantics it adds C(creates)/C(removes) options as a workaround
+ support: partial
+ diff_mode:
+ support: none
+ platform:
+ support: full
+ platforms: posix
+ raw:
+ support: full
+notes:
+ - If you want to execute a command securely and predictably, it may be
+ better to use the M(ansible.builtin.command) module instead. Best practices
+ when writing playbooks will follow the trend of using M(ansible.builtin.command)
+ unless the M(ansible.builtin.shell) module is explicitly required. When running ad-hoc
+ commands, use your best judgement.
+ - To sanitize any variables passed to the shell module, you should use
+ C({{ var | quote }}) instead of just C({{ var }}) to make sure they
+ do not include evil things like semicolons.
+ - An alternative to using inline shell scripts with this module is to use
+ the M(ansible.builtin.script) module possibly together with the M(ansible.builtin.template) module.
+ - For rebooting systems, use the M(ansible.builtin.reboot) or M(ansible.windows.win_reboot) module.
+seealso:
+- module: ansible.builtin.command
+- module: ansible.builtin.raw
+- module: ansible.builtin.script
+- module: ansible.windows.win_shell
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Execute the command in remote shell; stdout goes to the specified file on the remote
+ ansible.builtin.shell: somescript.sh >> somelog.txt
+
+- name: Change the working directory to somedir/ before executing the command
+ ansible.builtin.shell: somescript.sh >> somelog.txt
+ args:
+ chdir: somedir/
+
+# You can also use the 'args' form to provide the options.
+- name: This command will change the working directory to somedir/ and will only run when somedir/somelog.txt doesn't exist
+ ansible.builtin.shell: somescript.sh >> somelog.txt
+ args:
+ chdir: somedir/
+ creates: somelog.txt
+
+# You can also use the 'cmd' parameter instead of free form format.
+- name: This command will change the working directory to somedir/
+ ansible.builtin.shell:
+ cmd: ls -l | grep log
+ chdir: somedir/
+
+- name: Run a command that uses non-posix shell-isms (in this example /bin/sh doesn't handle redirection and wildcards together but bash does)
+ ansible.builtin.shell: cat < /tmp/*txt
+ args:
+ executable: /bin/bash
+
+- name: Run a command using a templated variable (always use quote filter to avoid injection)
+ ansible.builtin.shell: cat {{ myfile|quote }}
+
+# You can use shell to run other executables to perform actions inline
+- name: Run expect to wait for a successful PXE boot via out-of-band CIMC
+ ansible.builtin.shell: |
+ set timeout 300
+ spawn ssh admin@{{ cimc_host }}
+
+ expect "password:"
+ send "{{ cimc_password }}\n"
+
+ expect "\n{{ cimc_name }}"
+ send "connect host\n"
+
+ expect "pxeboot.n12"
+ send "\n"
+
+ exit 0
+ args:
+ executable: /usr/bin/expect
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+msg:
+ description: changed
+ returned: always
+ type: bool
+ sample: True
+start:
+ description: The command execution start time.
+ returned: always
+ type: str
+ sample: '2016-02-25 09:18:26.429568'
+end:
+ description: The command execution end time.
+ returned: always
+ type: str
+ sample: '2016-02-25 09:18:26.755339'
+delta:
+ description: The command execution delta time.
+ returned: always
+ type: str
+ sample: '0:00:00.325771'
+stdout:
+ description: The command standard output.
+ returned: always
+ type: str
+ sample: 'Clustering node rabbit@slave1 with rabbit@master …'
+stderr:
+ description: The command standard error.
+ returned: always
+ type: str
+ sample: 'ls: cannot access foo: No such file or directory'
+cmd:
+ description: The command executed by the task.
+ returned: always
+ type: str
+ sample: 'rabbitmqctl join_cluster rabbit@master'
+rc:
+ description: The command return code (0 means success).
+ returned: always
+ type: int
+ sample: 0
+stdout_lines:
+ description: The command standard output split in lines.
+ returned: always
+ type: list
+ sample: [u'Clustering node rabbit@slave1 with rabbit@master …']
+stderr_lines:
+ description: The command standard error split in lines.
+ returned: always
+ type: list
+ sample: [u'ls cannot access foo: No such file or directory', u'ls …']
+'''
diff --git a/lib/ansible/modules/slurp.py b/lib/ansible/modules/slurp.py
new file mode 100644
index 0000000..55abfeb
--- /dev/null
+++ b/lib/ansible/modules/slurp.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: slurp
+version_added: historical
+short_description: Slurps a file from remote nodes
+description:
+ - This module works like M(ansible.builtin.fetch). It is used for fetching a base64-
+ encoded blob containing the data in a remote file.
+ - This module is also supported for Windows targets.
+options:
+ src:
+ description:
+ - The file on the remote system to fetch. This I(must) be a file, not a directory.
+ type: path
+ required: true
+ aliases: [ path ]
+extends_documentation_fragment:
+ - action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix, windows
+notes:
+ - This module returns an 'in memory' base64 encoded version of the file, take
+ into account that this will require at least twice the RAM as the original file size.
+seealso:
+- module: ansible.builtin.fetch
+author:
+ - Ansible Core Team
+ - Michael DeHaan (@mpdehaan)
+'''
+
+EXAMPLES = r'''
+- name: Find out what the remote machine's mounts are
+ ansible.builtin.slurp:
+ src: /proc/mounts
+ register: mounts
+
+- name: Print returned information
+ ansible.builtin.debug:
+ msg: "{{ mounts['content'] | b64decode }}"
+
+# From the commandline, find the pid of the remote machine's sshd
+# $ ansible host -m ansible.builtin.slurp -a 'src=/var/run/sshd.pid'
+# host | SUCCESS => {
+# "changed": false,
+# "content": "MjE3OQo=",
+# "encoding": "base64",
+# "source": "/var/run/sshd.pid"
+# }
+# $ echo MjE3OQo= | base64 -d
+# 2179
+'''
+
+RETURN = r'''
+content:
+ description: Encoded file content
+ returned: success
+ type: str
+ sample: "MjE3OQo="
+encoding:
+ description: Type of encoding used for file
+ returned: success
+ type: str
+ sample: "base64"
+source:
+ description: Actual path of file slurped
+ returned: success
+ type: str
+ sample: "/var/run/sshd.pid"
+'''
+
+import base64
+import errno
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(type='path', required=True, aliases=['path']),
+ ),
+ supports_check_mode=True,
+ )
+ source = module.params['src']
+
+ try:
+ with open(source, 'rb') as source_fh:
+ source_content = source_fh.read()
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ msg = "file not found: %s" % source
+ elif e.errno == errno.EACCES:
+ msg = "file is not readable: %s" % source
+ elif e.errno == errno.EISDIR:
+ msg = "source is a directory and must be a file: %s" % source
+ else:
+ msg = "unable to slurp file: %s" % to_native(e, errors='surrogate_then_replace')
+
+ module.fail_json(msg)
+
+ data = base64.b64encode(source_content)
+
+ module.exit_json(content=data, source=source, encoding='base64')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/stat.py b/lib/ansible/modules/stat.py
new file mode 100644
index 0000000..45ca78b
--- /dev/null
+++ b/lib/ansible/modules/stat.py
@@ -0,0 +1,560 @@
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: stat
+version_added: "1.3"
+short_description: Retrieve file or file system status
+description:
+ - Retrieves facts for a file similar to the Linux/Unix 'stat' command.
+ - For Windows targets, use the M(ansible.windows.win_stat) module instead.
+options:
+ path:
+ description:
+ - The full path of the file/object to get the facts of.
+ type: path
+ required: true
+ aliases: [ dest, name ]
+ follow:
+ description:
+ - Whether to follow symlinks.
+ type: bool
+ default: no
+ get_checksum:
+ description:
+ - Whether to return a checksum of the file.
+ type: bool
+ default: yes
+ version_added: "1.8"
+ checksum_algorithm:
+ description:
+ - Algorithm to determine checksum of file.
+ - Will throw an error if the host is unable to use specified algorithm.
+ - The remote host has to support the hashing method specified, C(md5)
+ can be unavailable if the host is FIPS-140 compliant.
+ type: str
+ choices: [ md5, sha1, sha224, sha256, sha384, sha512 ]
+ default: sha1
+ aliases: [ checksum, checksum_algo ]
+ version_added: "2.0"
+ get_mime:
+ description:
+ - Use file magic and return data about the nature of the file. this uses
+ the 'file' utility found on most Linux/Unix systems.
+ - This will add both C(mime_type) and C(charset) fields to the return, if possible.
+ - In Ansible 2.3 this option changed from I(mime) to I(get_mime) and the default changed to C(true).
+ type: bool
+ default: yes
+ aliases: [ mime, mime_type, mime-type ]
+ version_added: "2.1"
+ get_attributes:
+ description:
+ - Get file attributes using lsattr tool if present.
+ type: bool
+ default: yes
+ aliases: [ attr, attributes ]
+ version_added: "2.3"
+extends_documentation_fragment:
+ - action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+seealso:
+- module: ansible.builtin.file
+- module: ansible.windows.win_stat
+author: Bruce Pennypacker (@bpennypacker)
+'''
+
+EXAMPLES = r'''
+# Obtain the stats of /etc/foo.conf, and check that the file still belongs
+# to 'root'. Fail otherwise.
+- name: Get stats of a file
+ ansible.builtin.stat:
+ path: /etc/foo.conf
+ register: st
+- name: Fail if the file does not belong to 'root'
+ ansible.builtin.fail:
+ msg: "Whoops! file ownership has changed"
+ when: st.stat.pw_name != 'root'
+
+# Determine if a path exists and is a symlink. Note that if the path does
+# not exist, and we test sym.stat.islnk, it will fail with an error. So
+# therefore, we must test whether it is defined.
+# Run this to understand the structure, the skipped ones do not pass the
+# check performed by 'when'
+- name: Get stats of the FS object
+ ansible.builtin.stat:
+ path: /path/to/something
+ register: sym
+
+- name: Print a debug message
+ ansible.builtin.debug:
+ msg: "islnk isn't defined (path doesn't exist)"
+ when: sym.stat.islnk is not defined
+
+- name: Print a debug message
+ ansible.builtin.debug:
+ msg: "islnk is defined (path must exist)"
+ when: sym.stat.islnk is defined
+
+- name: Print a debug message
+ ansible.builtin.debug:
+ msg: "Path exists and is a symlink"
+ when: sym.stat.islnk is defined and sym.stat.islnk
+
+- name: Print a debug message
+ ansible.builtin.debug:
+ msg: "Path exists and isn't a symlink"
+ when: sym.stat.islnk is defined and sym.stat.islnk == False
+
+
+# Determine if a path exists and is a directory. Note that we need to test
+# both that p.stat.isdir actually exists, and also that it's set to true.
+- name: Get stats of the FS object
+ ansible.builtin.stat:
+ path: /path/to/something
+ register: p
+- name: Print a debug message
+ ansible.builtin.debug:
+ msg: "Path exists and is a directory"
+ when: p.stat.isdir is defined and p.stat.isdir
+
+- name: Do not calculate the checksum
+ ansible.builtin.stat:
+ path: /path/to/myhugefile
+ get_checksum: no
+
+- name: Use sha256 to calculate the checksum
+ ansible.builtin.stat:
+ path: /path/to/something
+ checksum_algorithm: sha256
+'''
+
+RETURN = r'''
+stat:
+ description: Dictionary containing all the stat data, some platforms might add additional fields.
+ returned: success
+ type: complex
+ contains:
+ exists:
+ description: If the destination path actually exists or not
+ returned: success
+ type: bool
+ sample: True
+ path:
+ description: The full path of the file/object to get the facts of
+ returned: success and if path exists
+ type: str
+ sample: '/path/to/file'
+ mode:
+ description: Unix permissions of the file in octal representation as a string
+ returned: success, path exists and user can read stats
+ type: str
+ sample: 1755
+ isdir:
+ description: Tells you if the path is a directory
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ ischr:
+ description: Tells you if the path is a character device
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ isblk:
+ description: Tells you if the path is a block device
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ isreg:
+ description: Tells you if the path is a regular file
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ isfifo:
+ description: Tells you if the path is a named pipe
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ islnk:
+ description: Tells you if the path is a symbolic link
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ issock:
+ description: Tells you if the path is a unix domain socket
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ uid:
+ description: Numeric id representing the file owner
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 1003
+ gid:
+ description: Numeric id representing the group of the owner
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 1003
+ size:
+ description: Size in bytes for a plain file, amount of data for some special files
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 203
+ inode:
+ description: Inode number of the path
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 12758
+ dev:
+ description: Device the inode resides on
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 33
+ nlink:
+ description: Number of links to the inode (hard links)
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 1
+ atime:
+ description: Time of last access
+ returned: success, path exists and user can read stats
+ type: float
+ sample: 1424348972.575
+ mtime:
+ description: Time of last modification
+ returned: success, path exists and user can read stats
+ type: float
+ sample: 1424348972.575
+ ctime:
+ description: Time of last metadata update or creation (depends on OS)
+ returned: success, path exists and user can read stats
+ type: float
+ sample: 1424348972.575
+ wusr:
+ description: Tells you if the owner has write permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ rusr:
+ description: Tells you if the owner has read permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ xusr:
+ description: Tells you if the owner has execute permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ wgrp:
+ description: Tells you if the owner's group has write permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ rgrp:
+ description: Tells you if the owner's group has read permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ xgrp:
+ description: Tells you if the owner's group has execute permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ woth:
+ description: Tells you if others have write permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ roth:
+ description: Tells you if others have read permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ xoth:
+ description: Tells you if others have execute permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ isuid:
+ description: Tells you if the invoking user's id matches the owner's id
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ isgid:
+ description: Tells you if the invoking user's group id matches the owner's group id
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ lnk_source:
+ description: Target of the symlink normalized for the remote filesystem
+ returned: success, path exists and user can read stats and the path is a symbolic link
+ type: str
+ sample: /home/foobar/21102015-1445431274-908472971
+ lnk_target:
+ description: Target of the symlink. Note that relative paths remain relative
+ returned: success, path exists and user can read stats and the path is a symbolic link
+ type: str
+ sample: ../foobar/21102015-1445431274-908472971
+ version_added: 2.4
+ md5:
+ description: md5 hash of the file; this will be removed in Ansible 2.9 in
+ favor of the checksum return value
+ returned: success, path exists and user can read stats and path
+ supports hashing and md5 is supported
+ type: str
+ sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0
+ checksum:
+ description: hash of the file
+ returned: success, path exists, user can read stats, path supports
+ hashing and supplied checksum algorithm is available
+ type: str
+ sample: 50ba294cdf28c0d5bcde25708df53346825a429f
+ pw_name:
+ description: User name of owner
+ returned: success, path exists, user can read stats, owner name can be looked up and installed python supports it
+ type: str
+ sample: httpd
+ gr_name:
+ description: Group name of owner
+ returned: success, path exists, user can read stats, owner group can be looked up and installed python supports it
+ type: str
+ sample: www-data
+ mimetype:
+ description: file magic data or mime-type
+ returned: success, path exists and user can read stats and
+ installed python supports it and the I(mime) option was true, will
+ return C(unknown) on error.
+ type: str
+ sample: application/pdf; charset=binary
+ charset:
+ description: file character set or encoding
+ returned: success, path exists and user can read stats and
+ installed python supports it and the I(mime) option was true, will
+ return C(unknown) on error.
+ type: str
+ sample: us-ascii
+ readable:
+ description: Tells you if the invoking user has the right to read the path
+ returned: success, path exists and user can read the path
+ type: bool
+ sample: False
+ version_added: 2.2
+ writeable:
+ description: Tells you if the invoking user has the right to write the path
+ returned: success, path exists and user can write the path
+ type: bool
+ sample: False
+ version_added: 2.2
+ executable:
+ description: Tells you if the invoking user has execute permission on the path
+ returned: success, path exists and user can execute the path
+ type: bool
+ sample: False
+ version_added: 2.2
+ attributes:
+ description: list of file attributes
+ returned: success, path exists and user can execute the path
+ type: list
+ sample: [ immutable, extent ]
+ version_added: 2.3
+ version:
+ description: The version/generation attribute of a file according to the filesystem
+ returned: success, path exists, user can execute the path, lsattr is available and filesystem supports
+ type: str
+ sample: "381700746"
+ version_added: 2.3
+'''
+
+import errno
+import grp
+import os
+import pwd
+import stat
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes
+
+
+def format_output(module, path, st):
+ mode = st.st_mode
+
+ # back to ansible
+ output = dict(
+ exists=True,
+ path=path,
+ mode="%04o" % stat.S_IMODE(mode),
+ isdir=stat.S_ISDIR(mode),
+ ischr=stat.S_ISCHR(mode),
+ isblk=stat.S_ISBLK(mode),
+ isreg=stat.S_ISREG(mode),
+ isfifo=stat.S_ISFIFO(mode),
+ islnk=stat.S_ISLNK(mode),
+ issock=stat.S_ISSOCK(mode),
+ uid=st.st_uid,
+ gid=st.st_gid,
+ size=st.st_size,
+ inode=st.st_ino,
+ dev=st.st_dev,
+ nlink=st.st_nlink,
+ atime=st.st_atime,
+ mtime=st.st_mtime,
+ ctime=st.st_ctime,
+ wusr=bool(mode & stat.S_IWUSR),
+ rusr=bool(mode & stat.S_IRUSR),
+ xusr=bool(mode & stat.S_IXUSR),
+ wgrp=bool(mode & stat.S_IWGRP),
+ rgrp=bool(mode & stat.S_IRGRP),
+ xgrp=bool(mode & stat.S_IXGRP),
+ woth=bool(mode & stat.S_IWOTH),
+ roth=bool(mode & stat.S_IROTH),
+ xoth=bool(mode & stat.S_IXOTH),
+ isuid=bool(mode & stat.S_ISUID),
+ isgid=bool(mode & stat.S_ISGID),
+ )
+
+ # Platform dependent flags:
+ for other in [
+ # Some Linux
+ ('st_blocks', 'blocks'),
+ ('st_blksize', 'block_size'),
+ ('st_rdev', 'device_type'),
+ ('st_flags', 'flags'),
+ # Some Berkley based
+ ('st_gen', 'generation'),
+ ('st_birthtime', 'birthtime'),
+ # RISCOS
+ ('st_ftype', 'file_type'),
+ ('st_attrs', 'attrs'),
+ ('st_obtype', 'object_type'),
+ # macOS
+ ('st_rsize', 'real_size'),
+ ('st_creator', 'creator'),
+ ('st_type', 'file_type'),
+ ]:
+ if hasattr(st, other[0]):
+ output[other[1]] = getattr(st, other[0])
+
+ return output
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest', 'name']),
+ follow=dict(type='bool', default=False),
+ get_md5=dict(type='bool', default=False),
+ get_checksum=dict(type='bool', default=True),
+ get_mime=dict(type='bool', default=True, aliases=['mime', 'mime_type', 'mime-type']),
+ get_attributes=dict(type='bool', default=True, aliases=['attr', 'attributes']),
+ checksum_algorithm=dict(type='str', default='sha1',
+ choices=['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'],
+ aliases=['checksum', 'checksum_algo']),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params.get('path')
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ follow = module.params.get('follow')
+ get_mime = module.params.get('get_mime')
+ get_attr = module.params.get('get_attributes')
+ get_checksum = module.params.get('get_checksum')
+ checksum_algorithm = module.params.get('checksum_algorithm')
+
+ # NOTE: undocumented option since 2.9 to be removed at a later date if possible (3.0+)
+ # no real reason for keeping other than fear we may break older content.
+ get_md5 = module.params.get('get_md5')
+
+ # main stat data
+ try:
+ if follow:
+ st = os.stat(b_path)
+ else:
+ st = os.lstat(b_path)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ output = {'exists': False}
+ module.exit_json(changed=False, stat=output)
+
+ module.fail_json(msg=e.strerror)
+
+ # process base results
+ output = format_output(module, path, st)
+
+ # resolved permissions
+ for perm in [('readable', os.R_OK), ('writeable', os.W_OK), ('executable', os.X_OK)]:
+ output[perm[0]] = os.access(b_path, perm[1])
+
+ # symlink info
+ if output.get('islnk'):
+ output['lnk_source'] = os.path.realpath(b_path)
+ output['lnk_target'] = os.readlink(b_path)
+
+ try: # user data
+ pw = pwd.getpwuid(st.st_uid)
+ output['pw_name'] = pw.pw_name
+ except (TypeError, KeyError):
+ pass
+
+ try: # group data
+ grp_info = grp.getgrgid(st.st_gid)
+ output['gr_name'] = grp_info.gr_name
+ except (KeyError, ValueError, OverflowError):
+ pass
+
+ # checksums
+ if output.get('isreg') and output.get('readable'):
+
+ # NOTE: see above about get_md5
+ if get_md5:
+ # Will fail on FIPS-140 compliant systems
+ try:
+ output['md5'] = module.md5(b_path)
+ except ValueError:
+ output['md5'] = None
+
+ if get_checksum:
+ output['checksum'] = module.digest_from_file(b_path, checksum_algorithm)
+
+ # try to get mime data if requested
+ if get_mime:
+ output['mimetype'] = output['charset'] = 'unknown'
+ mimecmd = module.get_bin_path('file')
+ if mimecmd:
+ mimecmd = [mimecmd, '--mime-type', '--mime-encoding', b_path]
+ try:
+ rc, out, err = module.run_command(mimecmd)
+ if rc == 0:
+ mimetype, charset = out.rsplit(':', 1)[1].split(';')
+ output['mimetype'] = mimetype.strip()
+ output['charset'] = charset.split('=')[1].strip()
+ except Exception:
+ pass
+
+ # try to get attr data
+ if get_attr:
+ output['version'] = None
+ output['attributes'] = []
+ output['attr_flags'] = ''
+ out = module.get_file_attributes(b_path)
+ for x in ('version', 'attributes', 'attr_flags'):
+ if x in out:
+ output[x] = out[x]
+
+ module.exit_json(changed=False, stat=output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/subversion.py b/lib/ansible/modules/subversion.py
new file mode 100644
index 0000000..68aacfd
--- /dev/null
+++ b/lib/ansible/modules/subversion.py
@@ -0,0 +1,393 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: subversion
+short_description: Deploys a subversion repository
+description:
+ - Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout.
+version_added: "0.7"
+author:
+- Dane Summers (@dsummersl) <njharman@gmail.com>
+options:
+ repo:
+ description:
+ - The subversion URL to the repository.
+ type: str
+ required: true
+ aliases: [ name, repository ]
+ dest:
+ description:
+ - Absolute path where the repository should be deployed.
+ - The destination directory must be specified unless I(checkout=no), I(update=no), and I(export=no).
+ type: path
+ revision:
+ description:
+ - Specific revision to checkout.
+ type: str
+ default: HEAD
+ aliases: [ rev, version ]
+ force:
+ description:
+ - If C(true), modified files will be discarded. If C(false), module will fail if it encounters modified files.
+ Prior to 1.9 the default was C(true).
+ type: bool
+ default: "no"
+ in_place:
+ description:
+ - If the directory exists, then the working copy will be checked-out over-the-top using
+ svn checkout --force; if force is specified then existing files with different content are reverted.
+ type: bool
+ default: "no"
+ version_added: "2.6"
+ username:
+ description:
+ - C(--username) parameter passed to svn.
+ type: str
+ password:
+ description:
+ - C(--password) parameter passed to svn when svn is less than version 1.10.0. This is not secure and
+ the password will be leaked to argv.
+ - C(--password-from-stdin) parameter when svn is greater or equal to version 1.10.0.
+ type: str
+ executable:
+ description:
+ - Path to svn executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+ type: path
+ version_added: "1.4"
+ checkout:
+ description:
+ - If C(false), do not check out the repository if it does not exist locally.
+ type: bool
+ default: "yes"
+ version_added: "2.3"
+ update:
+ description:
+ - If C(false), do not retrieve new revisions from the origin repository.
+ type: bool
+ default: "yes"
+ version_added: "2.3"
+ export:
+ description:
+ - If C(true), do export instead of checkout/update.
+ type: bool
+ default: "no"
+ version_added: "1.6"
+ switch:
+ description:
+ - If C(false), do not call svn switch before update.
+ default: "yes"
+ version_added: "2.0"
+ type: bool
+ validate_certs:
+ description:
+ - If C(false), passes the C(--trust-server-cert) flag to svn.
+ - If C(true), does not pass the flag.
+ default: "no"
+ version_added: "2.11"
+ type: bool
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - This module does not handle externals.
+
+requirements:
+ - subversion (the command line tool with C(svn) entrypoint)
+'''
+
+EXAMPLES = '''
+- name: Checkout subversion repository to specified folder
+ ansible.builtin.subversion:
+ repo: svn+ssh://an.example.org/path/to/repo
+ dest: /src/checkout
+
+- name: Export subversion directory to folder
+ ansible.builtin.subversion:
+ repo: svn+ssh://an.example.org/path/to/repo
+ dest: /src/export
+ export: yes
+
+- name: Get information about the repository whether or not it has already been cloned locally
+ ansible.builtin.subversion:
+ repo: svn+ssh://an.example.org/path/to/repo
+ dest: /src/checkout
+ checkout: no
+ update: no
+'''
+
+RETURN = r'''#'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.compat.version import LooseVersion
+
+
+class Subversion(object):
+
+ # Example text matched by the regexp:
+ # Révision : 1889134
+ # 版本: 1889134
+ # Revision: 1889134
+ REVISION_RE = r'^\w+\s?:\s+\d+$'
+
+ def __init__(self, module, dest, repo, revision, username, password, svn_path, validate_certs):
+ self.module = module
+ self.dest = dest
+ self.repo = repo
+ self.revision = revision
+ self.username = username
+ self.password = password
+ self.svn_path = svn_path
+ self.validate_certs = validate_certs
+
+ def has_option_password_from_stdin(self):
+ rc, version, err = self.module.run_command([self.svn_path, '--version', '--quiet'], check_rc=True)
+ return LooseVersion(version) >= LooseVersion('1.10.0')
+
+ def _exec(self, args, check_rc=True):
+ '''Execute a subversion command, and return output. If check_rc is False, returns the return code instead of the output.'''
+ bits = [
+ self.svn_path,
+ '--non-interactive',
+ '--no-auth-cache',
+ ]
+ if not self.validate_certs:
+ bits.append('--trust-server-cert')
+ stdin_data = None
+ if self.username:
+ bits.extend(["--username", self.username])
+ if self.password:
+ if self.has_option_password_from_stdin():
+ bits.append("--password-from-stdin")
+ stdin_data = self.password
+ else:
+ self.module.warn("The authentication provided will be used on the svn command line and is not secure. "
+ "To securely pass credentials, upgrade svn to version 1.10.0 or greater.")
+ bits.extend(["--password", self.password])
+ bits.extend(args)
+ rc, out, err = self.module.run_command(bits, check_rc, data=stdin_data)
+
+ if check_rc:
+ return out.splitlines()
+ else:
+ return rc
+
+ def is_svn_repo(self):
+ '''Checks if path is a SVN Repo.'''
+ rc = self._exec(["info", self.dest], check_rc=False)
+ return rc == 0
+
+ def checkout(self, force=False):
+ '''Creates new svn working directory if it does not already exist.'''
+ cmd = ["checkout"]
+ if force:
+ cmd.append("--force")
+ cmd.extend(["-r", self.revision, self.repo, self.dest])
+ self._exec(cmd)
+
+ def export(self, force=False):
+ '''Export svn repo to directory'''
+ cmd = ["export"]
+ if force:
+ cmd.append("--force")
+ cmd.extend(["-r", self.revision, self.repo, self.dest])
+
+ self._exec(cmd)
+
+ def switch(self):
+ '''Change working directory's repo.'''
+ # switch to ensure we are pointing at correct repo.
+ # it also updates!
+ output = self._exec(["switch", "--revision", self.revision, self.repo, self.dest])
+ for line in output:
+ if re.search(r'^[ABDUCGE]\s', line):
+ return True
+ return False
+
+ def update(self):
+ '''Update existing svn working directory.'''
+ output = self._exec(["update", "-r", self.revision, self.dest])
+
+ for line in output:
+ if re.search(r'^[ABDUCGE]\s', line):
+ return True
+ return False
+
+ def revert(self):
+ '''Revert svn working directory.'''
+ output = self._exec(["revert", "-R", self.dest])
+ for line in output:
+ if re.search(r'^Reverted ', line) is None:
+ return True
+ return False
+
+ def get_revision(self):
+ '''Revision and URL of subversion working directory.'''
+ text = '\n'.join(self._exec(["info", self.dest]))
+ rev = re.search(self.REVISION_RE, text, re.MULTILINE)
+ if rev:
+ rev = rev.group(0)
+ else:
+ rev = 'Unable to get revision'
+
+ url = re.search(r'^URL\s?:.*$', text, re.MULTILINE)
+ if url:
+ url = url.group(0)
+ else:
+ url = 'Unable to get URL'
+
+ return rev, url
+
+ def get_remote_revision(self):
+ '''Revision and URL of subversion working directory.'''
+ text = '\n'.join(self._exec(["info", self.repo]))
+ rev = re.search(self.REVISION_RE, text, re.MULTILINE)
+ if rev:
+ rev = rev.group(0)
+ else:
+ rev = 'Unable to get remote revision'
+ return rev
+
+ def has_local_mods(self):
+ '''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''
+ lines = self._exec(["status", "--quiet", "--ignore-externals", self.dest])
+ # The --quiet option will return only modified files.
+ # Match only revisioned files, i.e. ignore status '?'.
+ regex = re.compile(r'^[^?X]')
+ # Has local mods if more than 0 modified revisioned files.
+ return len(list(filter(regex.match, lines))) > 0
+
+ def needs_update(self):
+ curr, url = self.get_revision()
+ out2 = '\n'.join(self._exec(["info", "-r", self.revision, self.dest]))
+ head = re.search(self.REVISION_RE, out2, re.MULTILINE)
+ if head:
+ head = head.group(0)
+ else:
+ head = 'Unable to get revision'
+ rev1 = int(curr.split(':')[1].strip())
+ rev2 = int(head.split(':')[1].strip())
+ change = False
+ if rev1 < rev2:
+ change = True
+ return change, curr, head
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path'),
+ repo=dict(type='str', required=True, aliases=['name', 'repository']),
+ revision=dict(type='str', default='HEAD', aliases=['rev', 'version']),
+ force=dict(type='bool', default=False),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ executable=dict(type='path'),
+ export=dict(type='bool', default=False),
+ checkout=dict(type='bool', default=True),
+ update=dict(type='bool', default=True),
+ switch=dict(type='bool', default=True),
+ in_place=dict(type='bool', default=False),
+ validate_certs=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ dest = module.params['dest']
+ repo = module.params['repo']
+ revision = module.params['revision']
+ force = module.params['force']
+ username = module.params['username']
+ password = module.params['password']
+ svn_path = module.params['executable'] or module.get_bin_path('svn', True)
+ export = module.params['export']
+ switch = module.params['switch']
+ checkout = module.params['checkout']
+ update = module.params['update']
+ in_place = module.params['in_place']
+ validate_certs = module.params['validate_certs']
+
+ # We screenscrape a huge amount of svn commands so use C locale anytime we
+ # call run_command()
+ locale = get_best_parsable_locale(module)
+ module.run_command_environ_update = dict(LANG=locale, LC_MESSAGES=locale)
+
+ if not dest and (checkout or update or export):
+ module.fail_json(msg="the destination directory must be specified unless checkout=no, update=no, and export=no")
+
+ svn = Subversion(module, dest, repo, revision, username, password, svn_path, validate_certs)
+
+ if not export and not update and not checkout:
+ module.exit_json(changed=False, after=svn.get_remote_revision())
+ if export or not os.path.exists(dest):
+ before = None
+ local_mods = False
+ if module.check_mode:
+ module.exit_json(changed=True)
+ elif not export and not checkout:
+ module.exit_json(changed=False)
+ if not export and checkout:
+ svn.checkout()
+ files_changed = True
+ else:
+ svn.export(force=force)
+ files_changed = True
+ elif svn.is_svn_repo():
+ # Order matters. Need to get local mods before switch to avoid false
+ # positives. Need to switch before revert to ensure we are reverting to
+ # correct repo.
+ if not update:
+ module.exit_json(changed=False)
+ if module.check_mode:
+ if svn.has_local_mods() and not force:
+ module.fail_json(msg="ERROR: modified files exist in the repository.")
+ check, before, after = svn.needs_update()
+ module.exit_json(changed=check, before=before, after=after)
+ files_changed = False
+ before = svn.get_revision()
+ local_mods = svn.has_local_mods()
+ if switch:
+ files_changed = svn.switch() or files_changed
+ if local_mods:
+ if force:
+ files_changed = svn.revert() or files_changed
+ else:
+ module.fail_json(msg="ERROR: modified files exist in the repository.")
+ files_changed = svn.update() or files_changed
+ elif in_place:
+ before = None
+ svn.checkout(force=True)
+ files_changed = True
+ local_mods = svn.has_local_mods()
+ if local_mods and force:
+ svn.revert()
+ else:
+ module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest,))
+
+ if export:
+ module.exit_json(changed=True)
+ else:
+ after = svn.get_revision()
+ changed = files_changed or local_mods
+ module.exit_json(changed=changed, before=before, after=after)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/systemd.py b/lib/ansible/modules/systemd.py
new file mode 100644
index 0000000..3580fa5
--- /dev/null
+++ b/lib/ansible/modules/systemd.py
@@ -0,0 +1,569 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: systemd_service
+author:
+ - Ansible Core Team
+version_added: "2.2"
+short_description: Manage systemd units
+description:
+ - Controls systemd units (services, timers, and so on) on remote hosts.
+options:
+ name:
+ description:
+ - Name of the unit. This parameter takes the name of exactly one unit to work with.
+ - When no extension is given, it is implied to a C(.service) as systemd.
+ - When using in a chroot environment you always need to specify the name of the unit with the extension. For example, C(crond.service).
+ type: str
+ aliases: [ service, unit ]
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
+ C(restarted) will always bounce the unit. C(reloaded) will always reload.
+ type: str
+ choices: [ reloaded, restarted, started, stopped ]
+ enabled:
+ description:
+ - Whether the unit should start on boot. B(At least one of state and enabled are required.)
+ type: bool
+ force:
+ description:
+ - Whether to override existing symlinks.
+ type: bool
+ version_added: 2.6
+ masked:
+ description:
+ - Whether the unit should be masked or not, a masked unit is impossible to start.
+ type: bool
+ daemon_reload:
+ description:
+ - Run daemon-reload before doing any other operations, to make sure systemd has read any changes.
+ - When set to C(true), runs daemon-reload even if the module does not start or stop anything.
+ type: bool
+ default: no
+ aliases: [ daemon-reload ]
+ daemon_reexec:
+ description:
+ - Run daemon_reexec command before doing any other operations, the systemd manager will serialize the manager state.
+ type: bool
+ default: no
+ aliases: [ daemon-reexec ]
+ version_added: "2.8"
+ scope:
+ description:
+ - Run systemctl within a given service manager scope, either as the default system scope C(system),
+ the current user's scope C(user), or the scope of all users C(global).
+ - "For systemd to work with 'user', the executing user must have its own instance of dbus started and accessible (systemd requirement)."
+ - "The user dbus process is normally started during normal login, but not during the run of Ansible tasks.
+ Otherwise you will probably get a 'Failed to connect to bus: no such file or directory' error."
+ - The user must have access, normally given via setting the C(XDG_RUNTIME_DIR) variable, see example below.
+
+ type: str
+ choices: [ system, user, global ]
+ default: system
+ version_added: "2.7"
+ no_block:
+ description:
+ - Do not synchronously wait for the requested operation to finish.
+ Enqueued job will continue without Ansible blocking on its completion.
+ type: bool
+ default: no
+ version_added: "2.3"
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - Since 2.4, one of the following options is required C(state), C(enabled), C(masked), C(daemon_reload), (C(daemon_reexec) since 2.8),
+ and all except C(daemon_reload) and (C(daemon_reexec) since 2.8) also require C(name).
+ - Before 2.4 you always required C(name).
+ - Globs are not supported in name, i.e C(postgres*.service).
+ - The service names might vary by specific OS/distribution
+requirements:
+ - A system managed by systemd.
+'''
+
+EXAMPLES = '''
+- name: Make sure a service unit is running
+ ansible.builtin.systemd:
+ state: started
+ name: httpd
+
+- name: Stop service cron on debian, if running
+ ansible.builtin.systemd:
+ name: cron
+ state: stopped
+
+- name: Restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
+ ansible.builtin.systemd:
+ state: restarted
+ daemon_reload: true
+ name: crond
+
+- name: Reload service httpd, in all cases
+ ansible.builtin.systemd:
+ name: httpd.service
+ state: reloaded
+
+- name: Enable service httpd and ensure it is not masked
+ ansible.builtin.systemd:
+ name: httpd
+ enabled: true
+ masked: no
+
+- name: Enable a timer unit for dnf-automatic
+ ansible.builtin.systemd:
+ name: dnf-automatic.timer
+ state: started
+ enabled: true
+
+- name: Just force systemd to reread configs (2.4 and above)
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Just force systemd to re-execute itself (2.8 and above)
+ ansible.builtin.systemd:
+ daemon_reexec: true
+
+- name: Run a user service when XDG_RUNTIME_DIR is not set on remote login
+ ansible.builtin.systemd:
+ name: myservice
+ state: started
+ scope: user
+ environment:
+ XDG_RUNTIME_DIR: "/run/user/{{ myuid }}"
+'''
+
+RETURN = '''
+status:
+ description: A dictionary with the key=value pairs returned from C(systemctl show).
+ returned: success
+ type: complex
+ sample: {
+ "ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ActiveEnterTimestampMonotonic": "8135942",
+ "ActiveExitTimestampMonotonic": "0",
+ "ActiveState": "active",
+ "After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
+ "AllowIsolate": "no",
+ "Before": "shutdown.target multi-user.target",
+ "BlockIOAccounting": "no",
+ "BlockIOWeight": "1000",
+ "CPUAccounting": "no",
+ "CPUSchedulingPolicy": "0",
+ "CPUSchedulingPriority": "0",
+ "CPUSchedulingResetOnFork": "no",
+ "CPUShares": "1024",
+ "CanIsolate": "no",
+ "CanReload": "yes",
+ "CanStart": "yes",
+ "CanStop": "yes",
+ "CapabilityBoundingSet": "18446744073709551615",
+ "ConditionResult": "yes",
+ "ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ConditionTimestampMonotonic": "7902742",
+ "Conflicts": "shutdown.target",
+ "ControlGroup": "/system.slice/crond.service",
+ "ControlPID": "0",
+ "DefaultDependencies": "yes",
+ "Delegate": "no",
+ "Description": "Command Scheduler",
+ "DevicePolicy": "auto",
+ "EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
+ "ExecMainCode": "0",
+ "ExecMainExitTimestampMonotonic": "0",
+ "ExecMainPID": "595",
+ "ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ExecMainStartTimestampMonotonic": "8134990",
+ "ExecMainStatus": "0",
+ "ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
+ "ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
+ "FragmentPath": "/usr/lib/systemd/system/crond.service",
+ "GuessMainPID": "yes",
+ "IOScheduling": "0",
+ "Id": "crond.service",
+ "IgnoreOnIsolate": "no",
+ "IgnoreOnSnapshot": "no",
+ "IgnoreSIGPIPE": "yes",
+ "InactiveEnterTimestampMonotonic": "0",
+ "InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "InactiveExitTimestampMonotonic": "8135942",
+ "JobTimeoutUSec": "0",
+ "KillMode": "process",
+ "KillSignal": "15",
+ "LimitAS": "18446744073709551615",
+ "LimitCORE": "18446744073709551615",
+ "LimitCPU": "18446744073709551615",
+ "LimitDATA": "18446744073709551615",
+ "LimitFSIZE": "18446744073709551615",
+ "LimitLOCKS": "18446744073709551615",
+ "LimitMEMLOCK": "65536",
+ "LimitMSGQUEUE": "819200",
+ "LimitNICE": "0",
+ "LimitNOFILE": "4096",
+ "LimitNPROC": "3902",
+ "LimitRSS": "18446744073709551615",
+ "LimitRTPRIO": "0",
+ "LimitRTTIME": "18446744073709551615",
+ "LimitSIGPENDING": "3902",
+ "LimitSTACK": "18446744073709551615",
+ "LoadState": "loaded",
+ "MainPID": "595",
+ "MemoryAccounting": "no",
+ "MemoryLimit": "18446744073709551615",
+ "MountFlags": "0",
+ "Names": "crond.service",
+ "NeedDaemonReload": "no",
+ "Nice": "0",
+ "NoNewPrivileges": "no",
+ "NonBlocking": "no",
+ "NotifyAccess": "none",
+ "OOMScoreAdjust": "0",
+ "OnFailureIsolate": "no",
+ "PermissionsStartOnly": "no",
+ "PrivateNetwork": "no",
+ "PrivateTmp": "no",
+ "RefuseManualStart": "no",
+ "RefuseManualStop": "no",
+ "RemainAfterExit": "no",
+ "Requires": "basic.target",
+ "Restart": "no",
+ "RestartUSec": "100ms",
+ "Result": "success",
+ "RootDirectoryStartOnly": "no",
+ "SameProcessGroup": "no",
+ "SecureBits": "0",
+ "SendSIGHUP": "no",
+ "SendSIGKILL": "yes",
+ "Slice": "system.slice",
+ "StandardError": "inherit",
+ "StandardInput": "null",
+ "StandardOutput": "journal",
+ "StartLimitAction": "none",
+ "StartLimitBurst": "5",
+ "StartLimitInterval": "10000000",
+ "StatusErrno": "0",
+ "StopWhenUnneeded": "no",
+ "SubState": "running",
+ "SyslogLevelPrefix": "yes",
+ "SyslogPriority": "30",
+ "TTYReset": "no",
+ "TTYVHangup": "no",
+ "TTYVTDisallocate": "no",
+ "TimeoutStartUSec": "1min 30s",
+ "TimeoutStopUSec": "1min 30s",
+ "TimerSlackNSec": "50000",
+ "Transient": "no",
+ "Type": "simple",
+ "UMask": "0022",
+ "UnitFileState": "enabled",
+ "WantedBy": "multi-user.target",
+ "Wants": "system.slice",
+ "WatchdogTimestampMonotonic": "0",
+ "WatchdogUSec": "0",
+ }
+''' # NOQA
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts.system.chroot import is_chroot
+from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing
+from ansible.module_utils._text import to_native
+
+
+def is_running_service(service_status):
+ return service_status['ActiveState'] in set(['active', 'activating'])
+
+
+def is_deactivating_service(service_status):
+ return service_status['ActiveState'] in set(['deactivating'])
+
+
+def request_was_ignored(out):
+ return '=' not in out and ('ignoring request' in out or 'ignoring command' in out)
+
+
+def parse_systemctl_show(lines):
+ # The output of 'systemctl show' can contain values that span multiple lines. At first glance it
+ # appears that such values are always surrounded by {}, so the previous version of this code
+ # assumed that any value starting with { was a multi-line value; it would then consume lines
+ # until it saw a line that ended with }. However, it is possible to have a single-line value
+ # that starts with { but does not end with } (this could happen in the value for Description=,
+ # for example), and the previous version of this code would then consume all remaining lines as
+ # part of that value. Cryptically, this would lead to Ansible reporting that the service file
+ # couldn't be found.
+ #
+ # To avoid this issue, the following code only accepts multi-line values for keys whose names
+ # start with Exec (e.g., ExecStart=), since these are the only keys whose values are known to
+ # span multiple lines.
+ parsed = {}
+ multival = []
+ k = None
+ for line in lines:
+ if k is None:
+ if '=' in line:
+ k, v = line.split('=', 1)
+ if k.startswith('Exec') and v.lstrip().startswith('{'):
+ if not v.rstrip().endswith('}'):
+ multival.append(v)
+ continue
+ parsed[k] = v.strip()
+ k = None
+ else:
+ multival.append(line)
+ if line.rstrip().endswith('}'):
+ parsed[k] = '\n'.join(multival).strip()
+ multival = []
+ k = None
+ return parsed
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ # initialize
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', aliases=['service', 'unit']),
+ state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ force=dict(type='bool'),
+ masked=dict(type='bool'),
+ daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']),
+ daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']),
+ scope=dict(type='str', default='system', choices=['system', 'user', 'global']),
+ no_block=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec']],
+ required_by=dict(
+ state=('name', ),
+ enabled=('name', ),
+ masked=('name', ),
+ ),
+ )
+
+ unit = module.params['name']
+ if unit is not None:
+ for globpattern in (r"*", r"?", r"["):
+ if globpattern in unit:
+ module.fail_json(msg="This module does not currently support using glob patterns, found '%s' in service name: %s" % (globpattern, unit))
+
+ systemctl = module.get_bin_path('systemctl', True)
+
+ if os.getenv('XDG_RUNTIME_DIR') is None:
+ os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid()
+
+ ''' Set CLI options depending on params '''
+ # if scope is 'system' or None, we can ignore as there is no extra switch.
+ # The other choices match the corresponding switch
+ if module.params['scope'] != 'system':
+ systemctl += " --%s" % module.params['scope']
+
+ if module.params['no_block']:
+ systemctl += " --no-block"
+
+ if module.params['force']:
+ systemctl += " --force"
+
+ rc = 0
+ out = err = ''
+ result = dict(
+ name=unit,
+ changed=False,
+ status=dict(),
+ )
+
+ # Run daemon-reload first, if requested
+ if module.params['daemon_reload'] and not module.check_mode:
+ (rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
+ if rc != 0:
+ module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
+
+ # Run daemon-reexec
+ if module.params['daemon_reexec'] and not module.check_mode:
+ (rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl))
+ if rc != 0:
+ module.fail_json(msg='failure %d during daemon-reexec: %s' % (rc, err))
+
+ if unit:
+ found = False
+ is_initd = sysv_exists(unit)
+ is_systemd = False
+
+ # check service data, cannot error out on rc as it changes across versions, assume not found
+ (rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
+
+ if rc == 0 and not (request_was_ignored(out) or request_was_ignored(err)):
+ # load return of systemctl show into dictionary for easy access and return
+ if out:
+ result['status'] = parse_systemctl_show(to_native(out).split('\n'))
+
+ is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
+
+ is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked'
+
+ # Check for loading error
+ if is_systemd and not is_masked and 'LoadError' in result['status']:
+ module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
+
+ # Workaround for https://github.com/ansible/ansible/issues/71528
+ elif err and rc == 1 and 'Failed to parse bus message' in err:
+ result['status'] = parse_systemctl_show(to_native(out).split('\n'))
+
+ unit_base, sep, suffix = unit.partition('@')
+ unit_search = '{unit_base}{sep}'.format(unit_base=unit_base, sep=sep)
+ (rc, out, err) = module.run_command("{systemctl} list-unit-files '{unit_search}*'".format(systemctl=systemctl, unit_search=unit_search))
+ is_systemd = unit_search in out
+
+ (rc, out, err) = module.run_command("{systemctl} is-active '{unit}'".format(systemctl=systemctl, unit=unit))
+ result['status']['ActiveState'] = out.rstrip('\n')
+
+ else:
+ # list taken from man systemctl(1) for systemd 244
+ valid_enabled_states = [
+ "enabled",
+ "enabled-runtime",
+ "linked",
+ "linked-runtime",
+ "masked",
+ "masked-runtime",
+ "static",
+ "indirect",
+ "disabled",
+ "generated",
+ "transient"]
+
+ (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
+ if out.strip() in valid_enabled_states:
+ is_systemd = True
+ else:
+ # fallback list-unit-files as show does not work on some systems (chroot)
+ # not used as primary as it skips some services (like those using init.d) and requires .service/etc notation
+ (rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit))
+ if rc == 0:
+ is_systemd = True
+ else:
+ # Check for systemctl command
+ module.run_command(systemctl, check_rc=True)
+
+ # Does service exist?
+ found = is_systemd or is_initd
+ if is_initd and not is_systemd:
+ module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit)
+
+ # mask/unmask the service, if requested, can operate on services before they are installed
+ if module.params['masked'] is not None:
+ # state is not masked unless systemd affirms otherwise
+ (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
+ masked = out.strip() == "masked"
+
+ if masked != module.params['masked']:
+ result['changed'] = True
+ if module.params['masked']:
+ action = 'mask'
+ else:
+ action = 'unmask'
+
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ # some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
+ fail_if_missing(module, found, unit, msg='host')
+
+ # Enable/disable service startup at boot if requested
+ if module.params['enabled'] is not None:
+
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ fail_if_missing(module, found, unit, msg='host')
+
+ # do we need to enable the service?
+ enabled = False
+ (rc, out, err) = module.run_command("%s is-enabled '%s' -l" % (systemctl, unit))
+
+ # check systemctl result or if it is a init script
+ if rc == 0:
+ enabled = True
+ # Check if the service is indirect or alias and if out contains exactly 1 line of string 'indirect'/ 'alias' it's disabled
+ if out.splitlines() == ["indirect"] or out.splitlines() == ["alias"]:
+ enabled = False
+
+ elif rc == 1:
+ # if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
+ if module.params['scope'] == 'system' and \
+ is_initd and \
+ not out.strip().endswith('disabled') and \
+ sysv_is_enabled(unit):
+ enabled = True
+
+ # default to current state
+ result['enabled'] = enabled
+
+ # Change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
+
+ result['enabled'] = not enabled
+
+ # set service state if requested
+ if module.params['state'] is not None:
+ fail_if_missing(module, found, unit, msg="host")
+
+ # default to desired state
+ result['state'] = module.params['state']
+
+ # What is current service state?
+ if 'ActiveState' in result['status']:
+ action = None
+ if module.params['state'] == 'started':
+ if not is_running_service(result['status']):
+ action = 'start'
+ elif module.params['state'] == 'stopped':
+ if is_running_service(result['status']) or is_deactivating_service(result['status']):
+ action = 'stop'
+ else:
+ if not is_running_service(result['status']):
+ action = 'start'
+ else:
+ action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
+ result['state'] = 'started'
+
+ if action:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
+ # check for chroot
+ elif is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1':
+ module.warn("Target is a chroot or systemd is offline. This can lead to false positives or prevent the init system tools from working.")
+ else:
+ # this should not happen?
+ module.fail_json(msg="Service is in unknown state", status=result['status'])
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/systemd_service.py b/lib/ansible/modules/systemd_service.py
new file mode 100644
index 0000000..3580fa5
--- /dev/null
+++ b/lib/ansible/modules/systemd_service.py
@@ -0,0 +1,569 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: systemd_service
+author:
+ - Ansible Core Team
+version_added: "2.2"
+short_description: Manage systemd units
+description:
+ - Controls systemd units (services, timers, and so on) on remote hosts.
+options:
+ name:
+ description:
+ - Name of the unit. This parameter takes the name of exactly one unit to work with.
+ - When no extension is given, it is implied to a C(.service) as systemd.
+ - When using in a chroot environment you always need to specify the name of the unit with the extension. For example, C(crond.service).
+ type: str
+ aliases: [ service, unit ]
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
+ C(restarted) will always bounce the unit. C(reloaded) will always reload.
+ type: str
+ choices: [ reloaded, restarted, started, stopped ]
+ enabled:
+ description:
+ - Whether the unit should start on boot. B(At least one of state and enabled are required.)
+ type: bool
+ force:
+ description:
+ - Whether to override existing symlinks.
+ type: bool
+ version_added: 2.6
+ masked:
+ description:
+ - Whether the unit should be masked or not, a masked unit is impossible to start.
+ type: bool
+ daemon_reload:
+ description:
+ - Run daemon-reload before doing any other operations, to make sure systemd has read any changes.
+ - When set to C(true), runs daemon-reload even if the module does not start or stop anything.
+ type: bool
+ default: no
+ aliases: [ daemon-reload ]
+ daemon_reexec:
+ description:
+ - Run daemon_reexec command before doing any other operations, the systemd manager will serialize the manager state.
+ type: bool
+ default: no
+ aliases: [ daemon-reexec ]
+ version_added: "2.8"
+ scope:
+ description:
+ - Run systemctl within a given service manager scope, either as the default system scope C(system),
+ the current user's scope C(user), or the scope of all users C(global).
+ - "For systemd to work with 'user', the executing user must have its own instance of dbus started and accessible (systemd requirement)."
+ - "The user dbus process is normally started during normal login, but not during the run of Ansible tasks.
+ Otherwise you will probably get a 'Failed to connect to bus: no such file or directory' error."
+ - The user must have access, normally given via setting the C(XDG_RUNTIME_DIR) variable, see example below.
+
+ type: str
+ choices: [ system, user, global ]
+ default: system
+ version_added: "2.7"
+ no_block:
+ description:
+ - Do not synchronously wait for the requested operation to finish.
+ Enqueued job will continue without Ansible blocking on its completion.
+ type: bool
+ default: no
+ version_added: "2.3"
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - Since 2.4, one of the following options is required C(state), C(enabled), C(masked), C(daemon_reload), (C(daemon_reexec) since 2.8),
+ and all except C(daemon_reload) and (C(daemon_reexec) since 2.8) also require C(name).
+ - Before 2.4 you always required C(name).
+ - Globs are not supported in name, i.e C(postgres*.service).
+ - The service names might vary by specific OS/distribution
+requirements:
+ - A system managed by systemd.
+'''
+
+EXAMPLES = '''
+- name: Make sure a service unit is running
+ ansible.builtin.systemd:
+ state: started
+ name: httpd
+
+- name: Stop service cron on debian, if running
+ ansible.builtin.systemd:
+ name: cron
+ state: stopped
+
+- name: Restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
+ ansible.builtin.systemd:
+ state: restarted
+ daemon_reload: true
+ name: crond
+
+- name: Reload service httpd, in all cases
+ ansible.builtin.systemd:
+ name: httpd.service
+ state: reloaded
+
+- name: Enable service httpd and ensure it is not masked
+ ansible.builtin.systemd:
+ name: httpd
+ enabled: true
+ masked: no
+
+- name: Enable a timer unit for dnf-automatic
+ ansible.builtin.systemd:
+ name: dnf-automatic.timer
+ state: started
+ enabled: true
+
+- name: Just force systemd to reread configs (2.4 and above)
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Just force systemd to re-execute itself (2.8 and above)
+ ansible.builtin.systemd:
+ daemon_reexec: true
+
+- name: Run a user service when XDG_RUNTIME_DIR is not set on remote login
+ ansible.builtin.systemd:
+ name: myservice
+ state: started
+ scope: user
+ environment:
+ XDG_RUNTIME_DIR: "/run/user/{{ myuid }}"
+'''
+
+RETURN = '''
+status:
+ description: A dictionary with the key=value pairs returned from C(systemctl show).
+ returned: success
+ type: complex
+ sample: {
+ "ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ActiveEnterTimestampMonotonic": "8135942",
+ "ActiveExitTimestampMonotonic": "0",
+ "ActiveState": "active",
+ "After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
+ "AllowIsolate": "no",
+ "Before": "shutdown.target multi-user.target",
+ "BlockIOAccounting": "no",
+ "BlockIOWeight": "1000",
+ "CPUAccounting": "no",
+ "CPUSchedulingPolicy": "0",
+ "CPUSchedulingPriority": "0",
+ "CPUSchedulingResetOnFork": "no",
+ "CPUShares": "1024",
+ "CanIsolate": "no",
+ "CanReload": "yes",
+ "CanStart": "yes",
+ "CanStop": "yes",
+ "CapabilityBoundingSet": "18446744073709551615",
+ "ConditionResult": "yes",
+ "ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ConditionTimestampMonotonic": "7902742",
+ "Conflicts": "shutdown.target",
+ "ControlGroup": "/system.slice/crond.service",
+ "ControlPID": "0",
+ "DefaultDependencies": "yes",
+ "Delegate": "no",
+ "Description": "Command Scheduler",
+ "DevicePolicy": "auto",
+ "EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
+ "ExecMainCode": "0",
+ "ExecMainExitTimestampMonotonic": "0",
+ "ExecMainPID": "595",
+ "ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ExecMainStartTimestampMonotonic": "8134990",
+ "ExecMainStatus": "0",
+ "ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
+ "ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
+ "FragmentPath": "/usr/lib/systemd/system/crond.service",
+ "GuessMainPID": "yes",
+ "IOScheduling": "0",
+ "Id": "crond.service",
+ "IgnoreOnIsolate": "no",
+ "IgnoreOnSnapshot": "no",
+ "IgnoreSIGPIPE": "yes",
+ "InactiveEnterTimestampMonotonic": "0",
+ "InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "InactiveExitTimestampMonotonic": "8135942",
+ "JobTimeoutUSec": "0",
+ "KillMode": "process",
+ "KillSignal": "15",
+ "LimitAS": "18446744073709551615",
+ "LimitCORE": "18446744073709551615",
+ "LimitCPU": "18446744073709551615",
+ "LimitDATA": "18446744073709551615",
+ "LimitFSIZE": "18446744073709551615",
+ "LimitLOCKS": "18446744073709551615",
+ "LimitMEMLOCK": "65536",
+ "LimitMSGQUEUE": "819200",
+ "LimitNICE": "0",
+ "LimitNOFILE": "4096",
+ "LimitNPROC": "3902",
+ "LimitRSS": "18446744073709551615",
+ "LimitRTPRIO": "0",
+ "LimitRTTIME": "18446744073709551615",
+ "LimitSIGPENDING": "3902",
+ "LimitSTACK": "18446744073709551615",
+ "LoadState": "loaded",
+ "MainPID": "595",
+ "MemoryAccounting": "no",
+ "MemoryLimit": "18446744073709551615",
+ "MountFlags": "0",
+ "Names": "crond.service",
+ "NeedDaemonReload": "no",
+ "Nice": "0",
+ "NoNewPrivileges": "no",
+ "NonBlocking": "no",
+ "NotifyAccess": "none",
+ "OOMScoreAdjust": "0",
+ "OnFailureIsolate": "no",
+ "PermissionsStartOnly": "no",
+ "PrivateNetwork": "no",
+ "PrivateTmp": "no",
+ "RefuseManualStart": "no",
+ "RefuseManualStop": "no",
+ "RemainAfterExit": "no",
+ "Requires": "basic.target",
+ "Restart": "no",
+ "RestartUSec": "100ms",
+ "Result": "success",
+ "RootDirectoryStartOnly": "no",
+ "SameProcessGroup": "no",
+ "SecureBits": "0",
+ "SendSIGHUP": "no",
+ "SendSIGKILL": "yes",
+ "Slice": "system.slice",
+ "StandardError": "inherit",
+ "StandardInput": "null",
+ "StandardOutput": "journal",
+ "StartLimitAction": "none",
+ "StartLimitBurst": "5",
+ "StartLimitInterval": "10000000",
+ "StatusErrno": "0",
+ "StopWhenUnneeded": "no",
+ "SubState": "running",
+ "SyslogLevelPrefix": "yes",
+ "SyslogPriority": "30",
+ "TTYReset": "no",
+ "TTYVHangup": "no",
+ "TTYVTDisallocate": "no",
+ "TimeoutStartUSec": "1min 30s",
+ "TimeoutStopUSec": "1min 30s",
+ "TimerSlackNSec": "50000",
+ "Transient": "no",
+ "Type": "simple",
+ "UMask": "0022",
+ "UnitFileState": "enabled",
+ "WantedBy": "multi-user.target",
+ "Wants": "system.slice",
+ "WatchdogTimestampMonotonic": "0",
+ "WatchdogUSec": "0",
+ }
+''' # NOQA
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts.system.chroot import is_chroot
+from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing
+from ansible.module_utils._text import to_native
+
+
+def is_running_service(service_status):
+ return service_status['ActiveState'] in set(['active', 'activating'])
+
+
+def is_deactivating_service(service_status):
+ return service_status['ActiveState'] in set(['deactivating'])
+
+
+def request_was_ignored(out):
+ return '=' not in out and ('ignoring request' in out or 'ignoring command' in out)
+
+
+def parse_systemctl_show(lines):
+ # The output of 'systemctl show' can contain values that span multiple lines. At first glance it
+ # appears that such values are always surrounded by {}, so the previous version of this code
+ # assumed that any value starting with { was a multi-line value; it would then consume lines
+ # until it saw a line that ended with }. However, it is possible to have a single-line value
+ # that starts with { but does not end with } (this could happen in the value for Description=,
+ # for example), and the previous version of this code would then consume all remaining lines as
+ # part of that value. Cryptically, this would lead to Ansible reporting that the service file
+ # couldn't be found.
+ #
+ # To avoid this issue, the following code only accepts multi-line values for keys whose names
+ # start with Exec (e.g., ExecStart=), since these are the only keys whose values are known to
+ # span multiple lines.
+ parsed = {}
+ multival = []
+ k = None
+ for line in lines:
+ if k is None:
+ if '=' in line:
+ k, v = line.split('=', 1)
+ if k.startswith('Exec') and v.lstrip().startswith('{'):
+ if not v.rstrip().endswith('}'):
+ multival.append(v)
+ continue
+ parsed[k] = v.strip()
+ k = None
+ else:
+ multival.append(line)
+ if line.rstrip().endswith('}'):
+ parsed[k] = '\n'.join(multival).strip()
+ multival = []
+ k = None
+ return parsed
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ # initialize
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', aliases=['service', 'unit']),
+ state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ force=dict(type='bool'),
+ masked=dict(type='bool'),
+ daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']),
+ daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']),
+ scope=dict(type='str', default='system', choices=['system', 'user', 'global']),
+ no_block=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec']],
+ required_by=dict(
+ state=('name', ),
+ enabled=('name', ),
+ masked=('name', ),
+ ),
+ )
+
+ unit = module.params['name']
+ if unit is not None:
+ for globpattern in (r"*", r"?", r"["):
+ if globpattern in unit:
+ module.fail_json(msg="This module does not currently support using glob patterns, found '%s' in service name: %s" % (globpattern, unit))
+
+ systemctl = module.get_bin_path('systemctl', True)
+
+ if os.getenv('XDG_RUNTIME_DIR') is None:
+ os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid()
+
+ ''' Set CLI options depending on params '''
+ # if scope is 'system' or None, we can ignore as there is no extra switch.
+ # The other choices match the corresponding switch
+ if module.params['scope'] != 'system':
+ systemctl += " --%s" % module.params['scope']
+
+ if module.params['no_block']:
+ systemctl += " --no-block"
+
+ if module.params['force']:
+ systemctl += " --force"
+
+ rc = 0
+ out = err = ''
+ result = dict(
+ name=unit,
+ changed=False,
+ status=dict(),
+ )
+
+ # Run daemon-reload first, if requested
+ if module.params['daemon_reload'] and not module.check_mode:
+ (rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
+ if rc != 0:
+ module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
+
+ # Run daemon-reexec
+ if module.params['daemon_reexec'] and not module.check_mode:
+ (rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl))
+ if rc != 0:
+ module.fail_json(msg='failure %d during daemon-reexec: %s' % (rc, err))
+
+ if unit:
+ found = False
+ is_initd = sysv_exists(unit)
+ is_systemd = False
+
+ # check service data, cannot error out on rc as it changes across versions, assume not found
+ (rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
+
+ if rc == 0 and not (request_was_ignored(out) or request_was_ignored(err)):
+ # load return of systemctl show into dictionary for easy access and return
+ if out:
+ result['status'] = parse_systemctl_show(to_native(out).split('\n'))
+
+ is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
+
+ is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked'
+
+ # Check for loading error
+ if is_systemd and not is_masked and 'LoadError' in result['status']:
+ module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
+
+ # Workaround for https://github.com/ansible/ansible/issues/71528
+ elif err and rc == 1 and 'Failed to parse bus message' in err:
+ result['status'] = parse_systemctl_show(to_native(out).split('\n'))
+
+ unit_base, sep, suffix = unit.partition('@')
+ unit_search = '{unit_base}{sep}'.format(unit_base=unit_base, sep=sep)
+ (rc, out, err) = module.run_command("{systemctl} list-unit-files '{unit_search}*'".format(systemctl=systemctl, unit_search=unit_search))
+ is_systemd = unit_search in out
+
+ (rc, out, err) = module.run_command("{systemctl} is-active '{unit}'".format(systemctl=systemctl, unit=unit))
+ result['status']['ActiveState'] = out.rstrip('\n')
+
+ else:
+ # list taken from man systemctl(1) for systemd 244
+ valid_enabled_states = [
+ "enabled",
+ "enabled-runtime",
+ "linked",
+ "linked-runtime",
+ "masked",
+ "masked-runtime",
+ "static",
+ "indirect",
+ "disabled",
+ "generated",
+ "transient"]
+
+ (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
+ if out.strip() in valid_enabled_states:
+ is_systemd = True
+ else:
+ # fallback list-unit-files as show does not work on some systems (chroot)
+ # not used as primary as it skips some services (like those using init.d) and requires .service/etc notation
+ (rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit))
+ if rc == 0:
+ is_systemd = True
+ else:
+ # Check for systemctl command
+ module.run_command(systemctl, check_rc=True)
+
+ # Does service exist?
+ found = is_systemd or is_initd
+ if is_initd and not is_systemd:
+ module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit)
+
+ # mask/unmask the service, if requested, can operate on services before they are installed
+ if module.params['masked'] is not None:
+ # state is not masked unless systemd affirms otherwise
+ (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
+ masked = out.strip() == "masked"
+
+ if masked != module.params['masked']:
+ result['changed'] = True
+ if module.params['masked']:
+ action = 'mask'
+ else:
+ action = 'unmask'
+
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ # some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
+ fail_if_missing(module, found, unit, msg='host')
+
+ # Enable/disable service startup at boot if requested
+ if module.params['enabled'] is not None:
+
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ fail_if_missing(module, found, unit, msg='host')
+
+ # do we need to enable the service?
+ enabled = False
+ (rc, out, err) = module.run_command("%s is-enabled '%s' -l" % (systemctl, unit))
+
+ # check systemctl result or if it is a init script
+ if rc == 0:
+ enabled = True
+ # Check if the service is indirect or alias and if out contains exactly 1 line of string 'indirect'/ 'alias' it's disabled
+ if out.splitlines() == ["indirect"] or out.splitlines() == ["alias"]:
+ enabled = False
+
+ elif rc == 1:
+ # if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
+ if module.params['scope'] == 'system' and \
+ is_initd and \
+ not out.strip().endswith('disabled') and \
+ sysv_is_enabled(unit):
+ enabled = True
+
+ # default to current state
+ result['enabled'] = enabled
+
+ # Change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
+
+ result['enabled'] = not enabled
+
+ # set service state if requested
+ if module.params['state'] is not None:
+ fail_if_missing(module, found, unit, msg="host")
+
+ # default to desired state
+ result['state'] = module.params['state']
+
+ # What is current service state?
+ if 'ActiveState' in result['status']:
+ action = None
+ if module.params['state'] == 'started':
+ if not is_running_service(result['status']):
+ action = 'start'
+ elif module.params['state'] == 'stopped':
+ if is_running_service(result['status']) or is_deactivating_service(result['status']):
+ action = 'stop'
+ else:
+ if not is_running_service(result['status']):
+ action = 'start'
+ else:
+ action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
+ result['state'] = 'started'
+
+ if action:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
+ # check for chroot
+ elif is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1':
+ module.warn("Target is a chroot or systemd is offline. This can lead to false positives or prevent the init system tools from working.")
+ else:
+ # this should not happen?
+ module.fail_json(msg="Service is in unknown state", status=result['status'])
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/sysvinit.py b/lib/ansible/modules/sysvinit.py
new file mode 100644
index 0000000..b3b9c10
--- /dev/null
+++ b/lib/ansible/modules/sysvinit.py
@@ -0,0 +1,364 @@
+# -*- coding: utf-8 -*-
+# (c) 2017, Brian Coca <bcoca@ansible.com>
+# (c) 2017, Adam Miller <admiller@redhat.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: sysvinit
+author:
+ - "Ansible Core Team"
+version_added: "2.6"
+short_description: Manage SysV services.
+description:
+ - Controls services on target hosts that use the SysV init system.
+options:
+ name:
+ required: true
+ description:
+ - Name of the service.
+ type: str
+ aliases: ['service']
+ state:
+ choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
+ Not all init scripts support C(restarted) nor C(reloaded) natively, so these will both trigger a stop and start as needed.
+ type: str
+ enabled:
+ type: bool
+ description:
+ - Whether the service should start on boot. B(At least one of state and enabled are required.)
+ sleep:
+ default: 1
+ description:
+ - If the service is being C(restarted) or C(reloaded) then sleep this many seconds between the stop and start command.
+ This helps to workaround badly behaving services.
+ type: int
+ pattern:
+ description:
+ - A substring to look for as would be found in the output of the I(ps) command as a stand-in for a status result.
+ - If the string is found, the service will be assumed to be running.
+ - "This option is mainly for use with init scripts that don't support the 'status' option."
+ type: str
+ runlevels:
+ description:
+ - The runlevels this script should be enabled/disabled from.
+ - Use this to override the defaults set by the package or init script itself.
+ type: list
+ elements: str
+ arguments:
+ description:
+ - Additional arguments provided on the command line that some init scripts accept.
+ type: str
+ aliases: [ 'args' ]
+ daemonize:
+ type: bool
+ description:
+ - Have the module daemonize as the service itself might not do so properly.
+ - This is useful with badly written init scripts or daemons, which
+ commonly manifests as the task hanging as it is still holding the
+ tty or the service dying when the task is over as the connection
+ closes the session.
+ default: no
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - One option other than name is required.
+ - The service names might vary by specific OS/distribution
+requirements:
+ - That the service managed has a corresponding init script.
+'''
+
+EXAMPLES = '''
+- name: Make sure apache2 is started
+ ansible.builtin.sysvinit:
+ name: apache2
+ state: started
+ enabled: yes
+
+- name: Make sure apache2 is started on runlevels 3 and 5
+ ansible.builtin.sysvinit:
+ name: apache2
+ state: started
+ enabled: yes
+ runlevels:
+ - 3
+ - 5
+'''
+
+RETURN = r'''
+results:
+ description: results from actions taken
+ returned: always
+ type: complex
+ sample: {
+ "attempts": 1,
+ "changed": true,
+ "name": "apache2",
+ "status": {
+ "enabled": {
+ "changed": true,
+ "rc": 0,
+ "stderr": "",
+ "stdout": ""
+ },
+ "stopped": {
+ "changed": true,
+ "rc": 0,
+ "stderr": "",
+ "stdout": "Stopping web server: apache2.\n"
+ }
+ }
+ }
+'''
+
+import re
+from time import sleep
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.service import sysv_is_enabled, get_sysv_script, sysv_exists, fail_if_missing, get_ps, daemonize
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str', aliases=['service']),
+ state=dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'),
+ enabled=dict(type='bool'),
+ sleep=dict(type='int', default=1),
+ pattern=dict(type='str'),
+ arguments=dict(type='str', aliases=['args']),
+ runlevels=dict(type='list', elements='str'),
+ daemonize=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled']],
+ )
+
+ name = module.params['name']
+ action = module.params['state']
+ enabled = module.params['enabled']
+ runlevels = module.params['runlevels']
+ pattern = module.params['pattern']
+ sleep_for = module.params['sleep']
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': name,
+ 'changed': False,
+ 'status': {}
+ }
+
+ # ensure service exists, get script name
+ fail_if_missing(module, sysv_exists(name), name)
+ script = get_sysv_script(name)
+
+ # locate binaries for service management
+ paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin']
+ binaries = ['chkconfig', 'update-rc.d', 'insserv', 'service']
+
+ # Keeps track of the service status for various runlevels because we can
+ # operate on multiple runlevels at once
+ runlevel_status = {}
+
+ location = {}
+ for binary in binaries:
+ location[binary] = module.get_bin_path(binary, opt_dirs=paths)
+
+ # figure out enable status
+ if runlevels:
+ for rl in runlevels:
+ runlevel_status.setdefault(rl, {})
+ runlevel_status[rl]["enabled"] = sysv_is_enabled(name, runlevel=rl)
+ else:
+ runlevel_status["enabled"] = sysv_is_enabled(name)
+
+ # figure out started status, everyone does it different!
+ is_started = False
+ worked = False
+
+ # user knows other methods fail and supplied pattern
+ if pattern:
+ worked = is_started = get_ps(module, pattern)
+ else:
+ if location.get('service'):
+ # standard tool that has been 'destandarized' by reimplementation in other OS/distros
+ cmd = '%s %s status' % (location['service'], name)
+ elif script:
+ # maybe script implements status (not LSB)
+ cmd = '%s status' % script
+ else:
+ module.fail_json(msg="Unable to determine service status")
+
+ (rc, out, err) = module.run_command(cmd)
+ if not rc == -1:
+ # special case
+ if name == 'iptables' and "ACCEPT" in out:
+ worked = True
+ is_started = True
+
+ # check output messages, messy but sadly more reliable than rc
+ if not worked and out.count('\n') <= 1:
+
+ cleanout = out.lower().replace(name.lower(), '')
+
+ for stopped in ['stop', 'is dead ', 'dead but ', 'could not access pid file', 'inactive']:
+ if stopped in cleanout:
+ worked = True
+ break
+
+ if not worked:
+ for started_status in ['run', 'start', 'active']:
+ if started_status in cleanout and "not " not in cleanout:
+ is_started = True
+ worked = True
+ break
+
+ # hope rc is not lying to us, use often used 'bad' returns
+ if not worked and rc in [1, 2, 3, 4, 69]:
+ worked = True
+
+ if not worked:
+ # hail mary
+ if rc == 0:
+ is_started = True
+ worked = True
+ # ps for luck, can only assure positive match
+ elif get_ps(module, name):
+ is_started = True
+ worked = True
+ module.warn("Used ps output to match service name and determine it is up, this is very unreliable")
+
+ if not worked:
+ module.warn("Unable to determine if service is up, assuming it is down")
+
+ ###########################################################################
+ # BEGIN: Enable/Disable
+ result['status'].setdefault('enabled', {})
+ result['status']['enabled']['changed'] = False
+ result['status']['enabled']['rc'] = None
+ result['status']['enabled']['stdout'] = None
+ result['status']['enabled']['stderr'] = None
+ if runlevels:
+ result['status']['enabled']['runlevels'] = runlevels
+ for rl in runlevels:
+ if enabled != runlevel_status[rl]["enabled"]:
+ result['changed'] = True
+ result['status']['enabled']['changed'] = True
+
+ if not module.check_mode and result['changed']:
+ # Perform enable/disable here
+ if enabled:
+ if location.get('update-rc.d'):
+ (rc, out, err) = module.run_command("%s %s enable %s" % (location['update-rc.d'], name, ' '.join(runlevels)))
+ elif location.get('chkconfig'):
+ (rc, out, err) = module.run_command("%s --level %s %s on" % (location['chkconfig'], ''.join(runlevels), name))
+ else:
+ if location.get('update-rc.d'):
+ (rc, out, err) = module.run_command("%s %s disable %s" % (location['update-rc.d'], name, ' '.join(runlevels)))
+ elif location.get('chkconfig'):
+ (rc, out, err) = module.run_command("%s --level %s %s off" % (location['chkconfig'], ''.join(runlevels), name))
+ else:
+ if enabled is not None and enabled != runlevel_status["enabled"]:
+ result['changed'] = True
+ result['status']['enabled']['changed'] = True
+
+ if not module.check_mode and result['changed']:
+ # Perform enable/disable here
+ if enabled:
+ if location.get('update-rc.d'):
+ (rc, out, err) = module.run_command("%s %s defaults" % (location['update-rc.d'], name))
+ elif location.get('chkconfig'):
+ (rc, out, err) = module.run_command("%s %s on" % (location['chkconfig'], name))
+ else:
+ if location.get('update-rc.d'):
+ (rc, out, err) = module.run_command("%s %s disable" % (location['update-rc.d'], name))
+ elif location.get('chkconfig'):
+ (rc, out, err) = module.run_command("%s %s off" % (location['chkconfig'], name))
+
+ # Assigned above, might be useful is something goes sideways
+ if not module.check_mode and result['status']['enabled']['changed']:
+ result['status']['enabled']['rc'] = rc
+ result['status']['enabled']['stdout'] = out
+ result['status']['enabled']['stderr'] = err
+ rc, out, err = None, None, None
+
+ if "illegal runlevel specified" in result['status']['enabled']['stderr']:
+ module.fail_json(msg="Illegal runlevel specified for enable operation on service %s" % name, **result)
+ # END: Enable/Disable
+ ###########################################################################
+
+ ###########################################################################
+ # BEGIN: state
+ result['status'].setdefault(module.params['state'], {})
+ result['status'][module.params['state']]['changed'] = False
+ result['status'][module.params['state']]['rc'] = None
+ result['status'][module.params['state']]['stdout'] = None
+ result['status'][module.params['state']]['stderr'] = None
+ if action:
+ action = re.sub(r'p?ed$', '', action.lower())
+
+ def runme(doit):
+
+ args = module.params['arguments']
+ cmd = "%s %s %s" % (script, doit, "" if args is None else args)
+
+ # how to run
+ if module.params['daemonize']:
+ (rc, out, err) = daemonize(module, cmd)
+ else:
+ (rc, out, err) = module.run_command(cmd)
+ # FIXME: ERRORS
+
+ if rc != 0:
+ module.fail_json(msg="Failed to %s service: %s" % (action, name), rc=rc, stdout=out, stderr=err)
+
+ return (rc, out, err)
+
+ if action == 'restart':
+ result['changed'] = True
+ result['status'][module.params['state']]['changed'] = True
+ if not module.check_mode:
+
+ # cannot rely on existing 'restart' in init script
+ for dothis in ['stop', 'start']:
+ (rc, out, err) = runme(dothis)
+ if sleep_for:
+ sleep(sleep_for)
+
+ elif is_started != (action == 'start'):
+ result['changed'] = True
+ result['status'][module.params['state']]['changed'] = True
+ if not module.check_mode:
+ rc, out, err = runme(action)
+
+ elif is_started == (action == 'stop'):
+ result['changed'] = True
+ result['status'][module.params['state']]['changed'] = True
+ if not module.check_mode:
+ rc, out, err = runme(action)
+
+ if not module.check_mode and result['status'][module.params['state']]['changed']:
+ result['status'][module.params['state']]['rc'] = rc
+ result['status'][module.params['state']]['stdout'] = out
+ result['status'][module.params['state']]['stderr'] = err
+ rc, out, err = None, None, None
+ # END: state
+ ###########################################################################
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/tempfile.py b/lib/ansible/modules/tempfile.py
new file mode 100644
index 0000000..10594de
--- /dev/null
+++ b/lib/ansible/modules/tempfile.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Krzysztof Magosa <krzysztof@magosa.pl>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: tempfile
+version_added: "2.3"
+short_description: Creates temporary files and directories
+description:
+ - The C(tempfile) module creates temporary files and directories. C(mktemp) command takes different parameters on various systems, this module helps
+ to avoid troubles related to that. Files/directories created by module are accessible only by creator. In case you need to make them world-accessible
+ you need to use M(ansible.builtin.file) module.
+ - For Windows targets, use the M(ansible.windows.win_tempfile) module instead.
+options:
+ state:
+ description:
+ - Whether to create file or directory.
+ type: str
+ choices: [ directory, file ]
+ default: file
+ path:
+ description:
+ - Location where temporary file or directory should be created.
+ - If path is not specified, the default system temporary directory will be used.
+ type: path
+ prefix:
+ description:
+ - Prefix of file/directory name created by module.
+ type: str
+ default: ansible.
+ suffix:
+ description:
+ - Suffix of file/directory name created by module.
+ type: str
+ default: ""
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+seealso:
+- module: ansible.builtin.file
+- module: ansible.windows.win_tempfile
+author:
+ - Krzysztof Magosa (@krzysztof-magosa)
+'''
+
+EXAMPLES = """
+- name: Create temporary build directory
+ ansible.builtin.tempfile:
+ state: directory
+ suffix: build
+
+- name: Create temporary file
+ ansible.builtin.tempfile:
+ state: file
+ suffix: temp
+ register: tempfile_1
+
+- name: Use the registered var and the file module to remove the temporary file
+ ansible.builtin.file:
+ path: "{{ tempfile_1.path }}"
+ state: absent
+ when: tempfile_1.path is defined
+"""
+
+RETURN = '''
+path:
+ description: Path to created file or directory.
+ returned: success
+ type: str
+ sample: "/tmp/ansible.bMlvdk"
+'''
+
+from os import close
+from tempfile import mkstemp, mkdtemp
+from traceback import format_exc
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='file', choices=['file', 'directory']),
+ path=dict(type='path'),
+ prefix=dict(type='str', default='ansible.'),
+ suffix=dict(type='str', default=''),
+ ),
+ )
+
+ try:
+ if module.params['state'] == 'file':
+ handle, path = mkstemp(
+ prefix=module.params['prefix'],
+ suffix=module.params['suffix'],
+ dir=module.params['path'],
+ )
+ close(handle)
+ else:
+ path = mkdtemp(
+ prefix=module.params['prefix'],
+ suffix=module.params['suffix'],
+ dir=module.params['path'],
+ )
+
+ module.exit_json(changed=True, path=path)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/template.py b/lib/ansible/modules/template.py
new file mode 100644
index 0000000..7ee581a
--- /dev/null
+++ b/lib/ansible/modules/template.py
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: template
+version_added: historical
+short_description: Template a file out to a target host
+options:
+ follow:
+ description:
+ - Determine whether symbolic links should be followed.
+ - When set to C(true) symbolic links will be followed, if they exist.
+ - When set to C(false) symbolic links will not be followed.
+ - Previous to Ansible 2.4, this was hardcoded as C(true).
+ type: bool
+ default: no
+ version_added: '2.4'
+notes:
+- For Windows you can use M(ansible.windows.win_template) which uses C(\r\n) as C(newline_sequence) by default.
+- The C(jinja2_native) setting has no effect. Native types are never used in the C(template) module which is by design used for generating text files.
+ For working with templates and utilizing Jinja2 native types see the C(jinja2_native) parameter of the C(template lookup).
+seealso:
+- module: ansible.builtin.copy
+- module: ansible.windows.win_copy
+- module: ansible.windows.win_template
+author:
+- Ansible Core Team
+- Michael DeHaan
+extends_documentation_fragment:
+- action_common_attributes
+- action_common_attributes.flow
+- action_common_attributes.files
+- backup
+- files
+- template_common
+- validate
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix
+ safe_file_operations:
+ support: full
+ vault:
+ support: full
+'''
+
+EXAMPLES = r'''
+- name: Template a file to /etc/file.conf
+ ansible.builtin.template:
+ src: /mytemplates/foo.j2
+ dest: /etc/file.conf
+ owner: bin
+ group: wheel
+ mode: '0644'
+
+- name: Template a file, using symbolic modes (equivalent to 0644)
+ ansible.builtin.template:
+ src: /mytemplates/foo.j2
+ dest: /etc/file.conf
+ owner: bin
+ group: wheel
+ mode: u=rw,g=r,o=r
+
+- name: Copy a version of named.conf that is dependent on the OS. setype obtained by doing ls -Z /etc/named.conf on original file
+ ansible.builtin.template:
+ src: named.conf_{{ ansible_os_family }}.j2
+ dest: /etc/named.conf
+ group: named
+ setype: named_conf_t
+ mode: 0640
+
+- name: Create a DOS-style text file from a template
+ ansible.builtin.template:
+ src: config.ini.j2
+ dest: /share/windows/config.ini
+ newline_sequence: '\r\n'
+
+- name: Copy a new sudoers file into place, after passing validation with visudo
+ ansible.builtin.template:
+ src: /mine/sudoers
+ dest: /etc/sudoers
+ validate: /usr/sbin/visudo -cf %s
+
+- name: Update sshd configuration safely, avoid locking yourself out
+ ansible.builtin.template:
+ src: etc/ssh/sshd_config.j2
+ dest: /etc/ssh/sshd_config
+ owner: root
+ group: root
+ mode: '0600'
+ validate: /usr/sbin/sshd -t -f %s
+ backup: yes
+'''
diff --git a/lib/ansible/modules/unarchive.py b/lib/ansible/modules/unarchive.py
new file mode 100644
index 0000000..26890b5
--- /dev/null
+++ b/lib/ansible/modules/unarchive.py
@@ -0,0 +1,1115 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
+# Copyright: (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright: (c) 2016, Dag Wieers <dag@wieers.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: unarchive
+version_added: '1.4'
+short_description: Unpacks an archive after (optionally) copying it from the local machine
+description:
+ - The C(unarchive) module unpacks an archive. It will not unpack a compressed file that does not contain an archive.
+ - By default, it will copy the source file from the local system to the target before unpacking.
+ - Set C(remote_src=yes) to unpack an archive which already exists on the target.
+ - If checksum validation is desired, use M(ansible.builtin.get_url) or M(ansible.builtin.uri) instead to fetch the file and set C(remote_src=yes).
+ - For Windows targets, use the M(community.windows.win_unzip) module instead.
+options:
+ src:
+ description:
+ - If C(remote_src=no) (default), local path to archive file to copy to the target server; can be absolute or relative. If C(remote_src=yes), path on the
+ target server to existing archive file to unpack.
+ - If C(remote_src=yes) and C(src) contains C(://), the remote machine will download the file from the URL first. (version_added 2.0). This is only for
+ simple cases, for full download support use the M(ansible.builtin.get_url) module.
+ type: path
+ required: true
+ dest:
+ description:
+ - Remote absolute path where the archive should be unpacked.
+ - The given path must exist. Base directory is not created by this module.
+ type: path
+ required: true
+ copy:
+ description:
+ - If true, the file is copied from local controller to the managed (remote) node, otherwise, the plugin will look for src archive on the managed machine.
+ - This option has been deprecated in favor of C(remote_src).
+ - This option is mutually exclusive with C(remote_src).
+ type: bool
+ default: yes
+ creates:
+ description:
+ - If the specified absolute path (file or directory) already exists, this step will B(not) be run.
+ - The specified absolute path (file or directory) must be below the base path given with C(dest:).
+ type: path
+ version_added: "1.6"
+ io_buffer_size:
+ description:
+ - Size of the volatile memory buffer that is used for extracting files from the archive in bytes.
+ type: int
+ default: 65536
+ version_added: "2.12"
+ list_files:
+ description:
+ - If set to True, return the list of files that are contained in the tarball.
+ type: bool
+ default: no
+ version_added: "2.0"
+ exclude:
+ description:
+ - List the directory and file entries that you would like to exclude from the unarchive action.
+ - Mutually exclusive with C(include).
+ type: list
+ default: []
+ elements: str
+ version_added: "2.1"
+ include:
+ description:
+ - List of directory and file entries that you would like to extract from the archive. If C(include)
+ is not empty, only files listed here will be extracted.
+ - Mutually exclusive with C(exclude).
+ type: list
+ default: []
+ elements: str
+ version_added: "2.11"
+ keep_newer:
+ description:
+ - Do not replace existing files that are newer than files from the archive.
+ type: bool
+ default: no
+ version_added: "2.1"
+ extra_opts:
+ description:
+ - Specify additional options by passing in an array.
+ - Each space-separated command-line option should be a new element of the array. See examples.
+ - Command-line options with multiple elements must use multiple lines in the array, one for each element.
+ type: list
+ elements: str
+ default: ""
+ version_added: "2.1"
+ remote_src:
+ description:
+ - Set to C(true) to indicate the archived file is already on the remote system and not local to the Ansible controller.
+ - This option is mutually exclusive with C(copy).
+ type: bool
+ default: no
+ version_added: "2.2"
+ validate_certs:
+ description:
+ - This only applies if using a https URL as the source of the file.
+ - This should only set to C(false) used on personally controlled sites using self-signed certificate.
+ - Prior to 2.2 the code worked as if this was set to C(true).
+ type: bool
+ default: yes
+ version_added: "2.2"
+extends_documentation_fragment:
+- action_common_attributes
+- action_common_attributes.flow
+- action_common_attributes.files
+- decrypt
+- files
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: partial
+ details: Not supported for gzipped tar files.
+ diff_mode:
+ support: partial
+ details: Uses gtar's C(--diff) arg to calculate if changed or not. If this C(arg) is not supported, it will always unpack the archive.
+ platform:
+ platforms: posix
+ safe_file_operations:
+ support: none
+ vault:
+ support: full
+todo:
+ - Re-implement tar support using native tarfile module.
+ - Re-implement zip support using native zipfile module.
+notes:
+ - Requires C(zipinfo) and C(gtar)/C(unzip) command on target host.
+ - Requires C(zstd) command on target host to expand I(.tar.zst) files.
+ - Can handle I(.zip) files using C(unzip) as well as I(.tar), I(.tar.gz), I(.tar.bz2), I(.tar.xz), and I(.tar.zst) files using C(gtar).
+ - Does not handle I(.gz) files, I(.bz2) files, I(.xz), or I(.zst) files that do not contain a I(.tar) archive.
+ - Existing files/directories in the destination which are not in the archive
+ are not touched. This is the same behavior as a normal archive extraction.
+ - Existing files/directories in the destination which are not in the archive
+ are ignored for purposes of deciding if the archive should be unpacked or not.
+seealso:
+- module: community.general.archive
+- module: community.general.iso_extract
+- module: community.windows.win_unzip
+author: Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Extract foo.tgz into /var/lib/foo
+ ansible.builtin.unarchive:
+ src: foo.tgz
+ dest: /var/lib/foo
+
+- name: Unarchive a file that is already on the remote machine
+ ansible.builtin.unarchive:
+ src: /tmp/foo.zip
+ dest: /usr/local/bin
+ remote_src: yes
+
+- name: Unarchive a file that needs to be downloaded (added in 2.0)
+ ansible.builtin.unarchive:
+ src: https://example.com/example.zip
+ dest: /usr/local/bin
+ remote_src: yes
+
+- name: Unarchive a file with extra options
+ ansible.builtin.unarchive:
+ src: /tmp/foo.zip
+ dest: /usr/local/bin
+ extra_opts:
+ - --transform
+ - s/^xxx/yyy/
+'''
+
+RETURN = r'''
+dest:
+ description: Path to the destination directory.
+ returned: always
+ type: str
+ sample: /opt/software
+files:
+ description: List of all the files in the archive.
+ returned: When I(list_files) is True
+ type: list
+ sample: '["file1", "file2"]'
+gid:
+ description: Numerical ID of the group that owns the destination directory.
+ returned: always
+ type: int
+ sample: 1000
+group:
+ description: Name of the group that owns the destination directory.
+ returned: always
+ type: str
+ sample: "librarians"
+handler:
+ description: Archive software handler used to extract and decompress the archive.
+ returned: always
+ type: str
+ sample: "TgzArchive"
+mode:
+ description: String that represents the octal permissions of the destination directory.
+ returned: always
+ type: str
+ sample: "0755"
+owner:
+ description: Name of the user that owns the destination directory.
+ returned: always
+ type: str
+ sample: "paul"
+size:
+ description: The size of destination directory in bytes. Does not include the size of files or subdirectories contained within.
+ returned: always
+ type: int
+ sample: 36
+src:
+ description:
+ - The source archive's path.
+ - If I(src) was a remote web URL, or from the local ansible controller, this shows the temporary location where the download was stored.
+ returned: always
+ type: str
+ sample: "/home/paul/test.tar.gz"
+state:
+ description: State of the destination. Effectively always "directory".
+ returned: always
+ type: str
+ sample: "directory"
+uid:
+ description: Numerical ID of the user that owns the destination directory.
+ returned: always
+ type: int
+ sample: 1000
+'''
+
+import binascii
+import codecs
+import datetime
+import fnmatch
+import grp
+import os
+import platform
+import pwd
+import re
+import stat
+import time
+import traceback
+from functools import partial
+from zipfile import ZipFile, BadZipfile
+
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.urls import fetch_file
+
+try: # python 3.3+
+ from shlex import quote # type: ignore[attr-defined]
+except ImportError: # older python
+ from pipes import quote
+
+# String from tar that shows the tar contents are different from the
+# filesystem
+OWNER_DIFF_RE = re.compile(r': Uid differs$')
+GROUP_DIFF_RE = re.compile(r': Gid differs$')
+MODE_DIFF_RE = re.compile(r': Mode differs$')
+MOD_TIME_DIFF_RE = re.compile(r': Mod time differs$')
+# NEWER_DIFF_RE = re.compile(r' is newer or same age.$')
+EMPTY_FILE_RE = re.compile(r': : Warning: Cannot stat: No such file or directory$')
+MISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')
+ZIP_FILE_MODE_RE = re.compile(r'([r-][w-][SsTtx-]){3}')
+INVALID_OWNER_RE = re.compile(r': Invalid owner')
+INVALID_GROUP_RE = re.compile(r': Invalid group')
+
+
+def crc32(path, buffer_size):
+ ''' Return a CRC32 checksum of a file '''
+
+ crc = binascii.crc32(b'')
+ with open(path, 'rb') as f:
+ for b_block in iter(partial(f.read, buffer_size), b''):
+ crc = binascii.crc32(b_block, crc)
+ return crc & 0xffffffff
+
+
+def shell_escape(string):
+ ''' Quote meta-characters in the args for the unix shell '''
+ return re.sub(r'([^A-Za-z0-9_])', r'\\\1', string)
+
+
+class UnarchiveError(Exception):
+ pass
+
+
+class ZipArchive(object):
+
+ def __init__(self, src, b_dest, file_args, module):
+ self.src = src
+ self.b_dest = b_dest
+ self.file_args = file_args
+ self.opts = module.params['extra_opts']
+ self.module = module
+ self.io_buffer_size = module.params["io_buffer_size"]
+ self.excludes = module.params['exclude']
+ self.includes = []
+ self.include_files = self.module.params['include']
+ self.cmd_path = None
+ self.zipinfo_cmd_path = None
+ self._files_in_archive = []
+ self._infodict = dict()
+ self.zipinfoflag = ''
+ self.binaries = (
+ ('unzip', 'cmd_path'),
+ ('zipinfo', 'zipinfo_cmd_path'),
+ )
+
+ def _permstr_to_octal(self, modestr, umask):
+ ''' Convert a Unix permission string (rw-r--r--) into a mode (0644) '''
+ revstr = modestr[::-1]
+ mode = 0
+ for j in range(0, 3):
+ for i in range(0, 3):
+ if revstr[i + 3 * j] in ['r', 'w', 'x', 's', 't']:
+ mode += 2 ** (i + 3 * j)
+ # The unzip utility does not support setting the stST bits
+# if revstr[i + 3 * j] in ['s', 't', 'S', 'T' ]:
+# mode += 2 ** (9 + j)
+ return (mode & ~umask)
+
+ def _legacy_file_list(self):
+ rc, out, err = self.module.run_command([self.cmd_path, '-v', self.src])
+ if rc:
+ raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)
+
+ for line in out.splitlines()[3:-2]:
+ fields = line.split(None, 7)
+ self._files_in_archive.append(fields[7])
+ self._infodict[fields[7]] = int(fields[6])
+
+ def _crc32(self, path):
+ if self._infodict:
+ return self._infodict[path]
+
+ try:
+ archive = ZipFile(self.src)
+ except BadZipfile as e:
+ if e.args[0].lower().startswith('bad magic number'):
+ # Python2.4 can't handle zipfiles with > 64K files. Try using
+ # /usr/bin/unzip instead
+ self._legacy_file_list()
+ else:
+ raise
+ else:
+ try:
+ for item in archive.infolist():
+ self._infodict[item.filename] = int(item.CRC)
+ except Exception:
+ archive.close()
+ raise UnarchiveError('Unable to list files in the archive')
+
+ return self._infodict[path]
+
+ @property
+ def files_in_archive(self):
+ if self._files_in_archive:
+ return self._files_in_archive
+
+ self._files_in_archive = []
+ try:
+ archive = ZipFile(self.src)
+ except BadZipfile as e:
+ if e.args[0].lower().startswith('bad magic number'):
+ # Python2.4 can't handle zipfiles with > 64K files. Try using
+ # /usr/bin/unzip instead
+ self._legacy_file_list()
+ else:
+ raise
+ else:
+ try:
+ for member in archive.namelist():
+ if self.include_files:
+ for include in self.include_files:
+ if fnmatch.fnmatch(member, include):
+ self._files_in_archive.append(to_native(member))
+ else:
+ exclude_flag = False
+ if self.excludes:
+ for exclude in self.excludes:
+ if fnmatch.fnmatch(member, exclude):
+ exclude_flag = True
+ break
+ if not exclude_flag:
+ self._files_in_archive.append(to_native(member))
+ except Exception as e:
+ archive.close()
+ raise UnarchiveError('Unable to list files in the archive: %s' % to_native(e))
+
+ archive.close()
+ return self._files_in_archive
+
+ def is_unarchived(self):
+ # BSD unzip doesn't support zipinfo listings with timestamp.
+ if self.zipinfoflag:
+ cmd = [self.zipinfo_cmd_path, self.zipinfoflag, '-T', '-s', self.src]
+ else:
+ cmd = [self.zipinfo_cmd_path, '-T', '-s', self.src]
+
+ if self.excludes:
+ cmd.extend(['-x', ] + self.excludes)
+ if self.include_files:
+ cmd.extend(self.include_files)
+ rc, out, err = self.module.run_command(cmd)
+
+ old_out = out
+ diff = ''
+ out = ''
+ if rc == 0:
+ unarchived = True
+ else:
+ unarchived = False
+
+ # Get some information related to user/group ownership
+ umask = os.umask(0)
+ os.umask(umask)
+ systemtype = platform.system()
+
+ # Get current user and group information
+ groups = os.getgroups()
+ run_uid = os.getuid()
+ run_gid = os.getgid()
+ try:
+ run_owner = pwd.getpwuid(run_uid).pw_name
+ except (TypeError, KeyError):
+ run_owner = run_uid
+ try:
+ run_group = grp.getgrgid(run_gid).gr_name
+ except (KeyError, ValueError, OverflowError):
+ run_group = run_gid
+
+ # Get future user ownership
+ fut_owner = fut_uid = None
+ if self.file_args['owner']:
+ try:
+ tpw = pwd.getpwnam(self.file_args['owner'])
+ except KeyError:
+ try:
+ tpw = pwd.getpwuid(int(self.file_args['owner']))
+ except (TypeError, KeyError, ValueError):
+ tpw = pwd.getpwuid(run_uid)
+ fut_owner = tpw.pw_name
+ fut_uid = tpw.pw_uid
+ else:
+ try:
+ fut_owner = run_owner
+ except Exception:
+ pass
+ fut_uid = run_uid
+
+ # Get future group ownership
+ fut_group = fut_gid = None
+ if self.file_args['group']:
+ try:
+ tgr = grp.getgrnam(self.file_args['group'])
+ except (ValueError, KeyError):
+ try:
+ # no need to check isdigit() explicitly here, if we fail to
+ # parse, the ValueError will be caught.
+ tgr = grp.getgrgid(int(self.file_args['group']))
+ except (KeyError, ValueError, OverflowError):
+ tgr = grp.getgrgid(run_gid)
+ fut_group = tgr.gr_name
+ fut_gid = tgr.gr_gid
+ else:
+ try:
+ fut_group = run_group
+ except Exception:
+ pass
+ fut_gid = run_gid
+
+ for line in old_out.splitlines():
+ change = False
+
+ pcs = line.split(None, 7)
+ if len(pcs) != 8:
+ # Too few fields... probably a piece of the header or footer
+ continue
+
+ # Check first and seventh field in order to skip header/footer
+ if len(pcs[0]) != 7 and len(pcs[0]) != 10:
+ continue
+ if len(pcs[6]) != 15:
+ continue
+
+ # Possible entries:
+ # -rw-rws--- 1.9 unx 2802 t- defX 11-Aug-91 13:48 perms.2660
+ # -rw-a-- 1.0 hpf 5358 Tl i4:3 4-Dec-91 11:33 longfilename.hpfs
+ # -r--ahs 1.1 fat 4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF
+ # --w------- 1.0 mac 17357 bx i8:2 4-May-92 04:02 unzip.macr
+ if pcs[0][0] not in 'dl-?' or not frozenset(pcs[0][1:]).issubset('rwxstah-'):
+ continue
+
+ ztype = pcs[0][0]
+ permstr = pcs[0][1:]
+ version = pcs[1]
+ ostype = pcs[2]
+ size = int(pcs[3])
+ path = to_text(pcs[7], errors='surrogate_or_strict')
+
+ # Skip excluded files
+ if path in self.excludes:
+ out += 'Path %s is excluded on request\n' % path
+ continue
+
+ # Itemized change requires L for symlink
+ if path[-1] == '/':
+ if ztype != 'd':
+ err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (path, ztype)
+ ftype = 'd'
+ elif ztype == 'l':
+ ftype = 'L'
+ elif ztype == '-':
+ ftype = 'f'
+ elif ztype == '?':
+ ftype = 'f'
+
+ # Some files may be storing FAT permissions, not Unix permissions
+ # For FAT permissions, we will use a base permissions set of 777 if the item is a directory or has the execute bit set. Otherwise, 666.
+ # This permission will then be modified by the system UMask.
+ # BSD always applies the Umask, even to Unix permissions.
+ # For Unix style permissions on Linux or Mac, we want to use them directly.
+ # So we set the UMask for this file to zero. That permission set will then be unchanged when calling _permstr_to_octal
+
+ if len(permstr) == 6:
+ if path[-1] == '/':
+ permstr = 'rwxrwxrwx'
+ elif permstr == 'rwx---':
+ permstr = 'rwxrwxrwx'
+ else:
+ permstr = 'rw-rw-rw-'
+ file_umask = umask
+ elif 'bsd' in systemtype.lower():
+ file_umask = umask
+ else:
+ file_umask = 0
+
+ # Test string conformity
+ if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
+ raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)
+
+ # DEBUG
+# err += "%s%s %10d %s\n" % (ztype, permstr, size, path)
+
+ b_dest = os.path.join(self.b_dest, to_bytes(path, errors='surrogate_or_strict'))
+ try:
+ st = os.lstat(b_dest)
+ except Exception:
+ change = True
+ self.includes.append(path)
+ err += 'Path %s is missing\n' % path
+ diff += '>%s++++++.?? %s\n' % (ftype, path)
+ continue
+
+ # Compare file types
+ if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
+ change = True
+ self.includes.append(path)
+ err += 'File %s already exists, but not as a directory\n' % path
+ diff += 'c%s++++++.?? %s\n' % (ftype, path)
+ continue
+
+ if ftype == 'f' and not stat.S_ISREG(st.st_mode):
+ change = True
+ unarchived = False
+ self.includes.append(path)
+ err += 'Directory %s already exists, but not as a regular file\n' % path
+ diff += 'c%s++++++.?? %s\n' % (ftype, path)
+ continue
+
+ if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
+ change = True
+ self.includes.append(path)
+ err += 'Directory %s already exists, but not as a symlink\n' % path
+ diff += 'c%s++++++.?? %s\n' % (ftype, path)
+ continue
+
+ itemized = list('.%s.......??' % ftype)
+
+ # Note: this timestamp calculation has a rounding error
+ # somewhere... unzip and this timestamp can be one second off
+ # When that happens, we report a change and re-unzip the file
+ dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))
+ timestamp = time.mktime(dt_object.timetuple())
+
+ # Compare file timestamps
+ if stat.S_ISREG(st.st_mode):
+ if self.module.params['keep_newer']:
+ if timestamp > st.st_mtime:
+ change = True
+ self.includes.append(path)
+ err += 'File %s is older, replacing file\n' % path
+ itemized[4] = 't'
+ elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
+ # Add to excluded files, ignore other changes
+ out += 'File %s is newer, excluding file\n' % path
+ self.excludes.append(path)
+ continue
+ else:
+ if timestamp != st.st_mtime:
+ change = True
+ self.includes.append(path)
+ err += 'File %s differs in mtime (%f vs %f)\n' % (path, timestamp, st.st_mtime)
+ itemized[4] = 't'
+
+ # Compare file sizes
+ if stat.S_ISREG(st.st_mode) and size != st.st_size:
+ change = True
+ err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size)
+ itemized[3] = 's'
+
+ # Compare file checksums
+ if stat.S_ISREG(st.st_mode):
+ crc = crc32(b_dest, self.io_buffer_size)
+ if crc != self._crc32(path):
+ change = True
+ err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (path, self._crc32(path), crc)
+ itemized[2] = 'c'
+
+ # Compare file permissions
+
+ # Do not handle permissions of symlinks
+ if ftype != 'L':
+
+ # Use the new mode provided with the action, if there is one
+ if self.file_args['mode']:
+ if isinstance(self.file_args['mode'], int):
+ mode = self.file_args['mode']
+ else:
+ try:
+ mode = int(self.file_args['mode'], 8)
+ except Exception as e:
+ try:
+ mode = AnsibleModule._symbolic_mode_to_octal(st, self.file_args['mode'])
+ except ValueError as e:
+ self.module.fail_json(path=path, msg="%s" % to_native(e), exception=traceback.format_exc())
+ # Only special files require no umask-handling
+ elif ztype == '?':
+ mode = self._permstr_to_octal(permstr, 0)
+ else:
+ mode = self._permstr_to_octal(permstr, file_umask)
+
+ if mode != stat.S_IMODE(st.st_mode):
+ change = True
+ itemized[5] = 'p'
+ err += 'Path %s differs in permissions (%o vs %o)\n' % (path, mode, stat.S_IMODE(st.st_mode))
+
+ # Compare file user ownership
+ owner = uid = None
+ try:
+ owner = pwd.getpwuid(st.st_uid).pw_name
+ except (TypeError, KeyError):
+ uid = st.st_uid
+
+ # If we are not root and requested owner is not our user, fail
+ if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
+ raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))
+
+ if owner and owner != fut_owner:
+ change = True
+ err += 'Path %s is owned by user %s, not by user %s as expected\n' % (path, owner, fut_owner)
+ itemized[6] = 'o'
+ elif uid and uid != fut_uid:
+ change = True
+ err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (path, uid, fut_uid)
+ itemized[6] = 'o'
+
+ # Compare file group ownership
+ group = gid = None
+ try:
+ group = grp.getgrgid(st.st_gid).gr_name
+ except (KeyError, ValueError, OverflowError):
+ gid = st.st_gid
+
+ if run_uid != 0 and (fut_group != run_group or fut_gid != run_gid) and fut_gid not in groups:
+ raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))
+
+ if group and group != fut_group:
+ change = True
+ err += 'Path %s is owned by group %s, not by group %s as expected\n' % (path, group, fut_group)
+ itemized[6] = 'g'
+ elif gid and gid != fut_gid:
+ change = True
+ err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (path, gid, fut_gid)
+ itemized[6] = 'g'
+
+ # Register changed files and finalize diff output
+ if change:
+ if path not in self.includes:
+ self.includes.append(path)
+ diff += '%s %s\n' % (''.join(itemized), path)
+
+ if self.includes:
+ unarchived = False
+
+ # DEBUG
+# out = old_out + out
+
+ return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)
+
+ def unarchive(self):
+ cmd = [self.cmd_path, '-o']
+ if self.opts:
+ cmd.extend(self.opts)
+ cmd.append(self.src)
+ # NOTE: Including (changed) files as arguments is problematic (limits on command line/arguments)
+ # if self.includes:
+ # NOTE: Command unzip has this strange behaviour where it expects quoted filenames to also be escaped
+ # cmd.extend(map(shell_escape, self.includes))
+ if self.excludes:
+ cmd.extend(['-x'] + self.excludes)
+ if self.include_files:
+ cmd.extend(self.include_files)
+ cmd.extend(['-d', self.b_dest])
+ rc, out, err = self.module.run_command(cmd)
+ return dict(cmd=cmd, rc=rc, out=out, err=err)
+
+ def can_handle_archive(self):
+ missing = []
+ for b in self.binaries:
+ try:
+ setattr(self, b[1], get_bin_path(b[0]))
+ except ValueError:
+ missing.append(b[0])
+
+ if missing:
+ return False, "Unable to find required '{missing}' binary in the path.".format(missing="' or '".join(missing))
+
+ cmd = [self.cmd_path, '-l', self.src]
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0:
+ return True, None
+ return False, 'Command "%s" could not handle archive: %s' % (self.cmd_path, err)
+
+
+class TgzArchive(object):
+
+ def __init__(self, src, b_dest, file_args, module):
+ self.src = src
+ self.b_dest = b_dest
+ self.file_args = file_args
+ self.opts = module.params['extra_opts']
+ self.module = module
+ if self.module.check_mode:
+ self.module.exit_json(skipped=True, msg="remote module (%s) does not support check mode when using gtar" % self.module._name)
+ self.excludes = [path.rstrip('/') for path in self.module.params['exclude']]
+ self.include_files = self.module.params['include']
+ self.cmd_path = None
+ self.tar_type = None
+ self.zipflag = '-z'
+ self._files_in_archive = []
+
+ def _get_tar_type(self):
+ cmd = [self.cmd_path, '--version']
+ (rc, out, err) = self.module.run_command(cmd)
+ tar_type = None
+ if out.startswith('bsdtar'):
+ tar_type = 'bsd'
+ elif out.startswith('tar') and 'GNU' in out:
+ tar_type = 'gnu'
+ return tar_type
+
+ @property
+ def files_in_archive(self):
+ if self._files_in_archive:
+ return self._files_in_archive
+
+ cmd = [self.cmd_path, '--list', '-C', self.b_dest]
+ if self.zipflag:
+ cmd.append(self.zipflag)
+ if self.opts:
+ cmd.extend(['--show-transformed-names'] + self.opts)
+ if self.excludes:
+ cmd.extend(['--exclude=' + f for f in self.excludes])
+ cmd.extend(['-f', self.src])
+ if self.include_files:
+ cmd.extend(self.include_files)
+
+ locale = get_best_parsable_locale(self.module)
+ rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LANGUAGE=locale))
+ if rc != 0:
+ raise UnarchiveError('Unable to list files in the archive: %s' % err)
+
+ for filename in out.splitlines():
+ # Compensate for locale-related problems in gtar output (octal unicode representation) #11348
+ # filename = filename.decode('string_escape')
+ filename = to_native(codecs.escape_decode(filename)[0])
+
+ # We don't allow absolute filenames. If the user wants to unarchive rooted in "/"
+ # they need to use "dest: '/'". This follows the defaults for gtar, pax, etc.
+ # Allowing absolute filenames here also causes bugs: https://github.com/ansible/ansible/issues/21397
+ if filename.startswith('/'):
+ filename = filename[1:]
+
+ exclude_flag = False
+ if self.excludes:
+ for exclude in self.excludes:
+ if fnmatch.fnmatch(filename, exclude):
+ exclude_flag = True
+ break
+
+ if not exclude_flag:
+ self._files_in_archive.append(to_native(filename))
+
+ return self._files_in_archive
+
+ def is_unarchived(self):
+ cmd = [self.cmd_path, '--diff', '-C', self.b_dest]
+ if self.zipflag:
+ cmd.append(self.zipflag)
+ if self.opts:
+ cmd.extend(['--show-transformed-names'] + self.opts)
+ if self.file_args['owner']:
+ cmd.append('--owner=' + quote(self.file_args['owner']))
+ if self.file_args['group']:
+ cmd.append('--group=' + quote(self.file_args['group']))
+ if self.module.params['keep_newer']:
+ cmd.append('--keep-newer-files')
+ if self.excludes:
+ cmd.extend(['--exclude=' + f for f in self.excludes])
+ cmd.extend(['-f', self.src])
+ if self.include_files:
+ cmd.extend(self.include_files)
+ locale = get_best_parsable_locale(self.module)
+ rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LANGUAGE=locale))
+
+ # Check whether the differences are in something that we're
+ # setting anyway
+
+ # What is different
+ unarchived = True
+ old_out = out
+ out = ''
+ run_uid = os.getuid()
+ # When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient
+ # Only way to be sure is to check request with what is on disk (as we do for zip)
+ # Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change
+ for line in old_out.splitlines() + err.splitlines():
+ # FIXME: Remove the bogus lines from error-output as well !
+ # Ignore bogus errors on empty filenames (when using --split-component)
+ if EMPTY_FILE_RE.search(line):
+ continue
+ if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):
+ out += line + '\n'
+ if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):
+ out += line + '\n'
+ if not self.file_args['mode'] and MODE_DIFF_RE.search(line):
+ out += line + '\n'
+ if MOD_TIME_DIFF_RE.search(line):
+ out += line + '\n'
+ if MISSING_FILE_RE.search(line):
+ out += line + '\n'
+ if INVALID_OWNER_RE.search(line):
+ out += line + '\n'
+ if INVALID_GROUP_RE.search(line):
+ out += line + '\n'
+ if out:
+ unarchived = False
+ return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
+
+ def unarchive(self):
+ cmd = [self.cmd_path, '--extract', '-C', self.b_dest]
+ if self.zipflag:
+ cmd.append(self.zipflag)
+ if self.opts:
+ cmd.extend(['--show-transformed-names'] + self.opts)
+ if self.file_args['owner']:
+ cmd.append('--owner=' + quote(self.file_args['owner']))
+ if self.file_args['group']:
+ cmd.append('--group=' + quote(self.file_args['group']))
+ if self.module.params['keep_newer']:
+ cmd.append('--keep-newer-files')
+ if self.excludes:
+ cmd.extend(['--exclude=' + f for f in self.excludes])
+ cmd.extend(['-f', self.src])
+ if self.include_files:
+ cmd.extend(self.include_files)
+ locale = get_best_parsable_locale(self.module)
+ rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LANGUAGE=locale))
+ return dict(cmd=cmd, rc=rc, out=out, err=err)
+
+ def can_handle_archive(self):
+ # Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J
+ try:
+ self.cmd_path = get_bin_path('gtar')
+ except ValueError:
+ # Fallback to tar
+ try:
+ self.cmd_path = get_bin_path('tar')
+ except ValueError:
+ return False, "Unable to find required 'gtar' or 'tar' binary in the path"
+
+ self.tar_type = self._get_tar_type()
+
+ if self.tar_type != 'gnu':
+ return False, 'Command "%s" detected as tar type %s. GNU tar required.' % (self.cmd_path, self.tar_type)
+
+ try:
+ if self.files_in_archive:
+ return True, None
+ except UnarchiveError as e:
+ return False, 'Command "%s" could not handle archive: %s' % (self.cmd_path, to_native(e))
+ # Errors and no files in archive assume that we weren't able to
+ # properly unarchive it
+ return False, 'Command "%s" found no files in archive. Empty archive files are not supported.' % self.cmd_path
+
+
+# Class to handle tar files that aren't compressed
+class TarArchive(TgzArchive):
+ def __init__(self, src, b_dest, file_args, module):
+ super(TarArchive, self).__init__(src, b_dest, file_args, module)
+ # argument to tar
+ self.zipflag = ''
+
+
+# Class to handle bzip2 compressed tar files
+class TarBzipArchive(TgzArchive):
+ def __init__(self, src, b_dest, file_args, module):
+ super(TarBzipArchive, self).__init__(src, b_dest, file_args, module)
+ self.zipflag = '-j'
+
+
+# Class to handle xz compressed tar files
+class TarXzArchive(TgzArchive):
+ def __init__(self, src, b_dest, file_args, module):
+ super(TarXzArchive, self).__init__(src, b_dest, file_args, module)
+ self.zipflag = '-J'
+
+
+# Class to handle zstd compressed tar files
+class TarZstdArchive(TgzArchive):
+ def __init__(self, src, b_dest, file_args, module):
+ super(TarZstdArchive, self).__init__(src, b_dest, file_args, module)
+ # GNU Tar supports the --use-compress-program option to
+ # specify which executable to use for
+ # compression/decompression.
+ #
+ # Note: some flavors of BSD tar support --zstd (e.g., FreeBSD
+ # 12.2), but the TgzArchive class only supports GNU Tar.
+ self.zipflag = '--use-compress-program=zstd'
+
+
+class ZipZArchive(ZipArchive):
+ def __init__(self, src, b_dest, file_args, module):
+ super(ZipZArchive, self).__init__(src, b_dest, file_args, module)
+ self.zipinfoflag = '-Z'
+ self.binaries = (
+ ('unzip', 'cmd_path'),
+ ('unzip', 'zipinfo_cmd_path'),
+ )
+
+ def can_handle_archive(self):
+ unzip_available, error_msg = super(ZipZArchive, self).can_handle_archive()
+
+ if not unzip_available:
+ return unzip_available, error_msg
+
+ # Ensure unzip -Z is available before we use it in is_unarchive
+ cmd = [self.zipinfo_cmd_path, self.zipinfoflag]
+ rc, out, err = self.module.run_command(cmd)
+ if 'zipinfo' in out.lower():
+ return True, None
+ return False, 'Command "unzip -Z" could not handle archive: %s' % err
+
+
+# try handlers in order and return the one that works or bail if none work
+def pick_handler(src, dest, file_args, module):
+ handlers = [ZipArchive, ZipZArchive, TgzArchive, TarArchive, TarBzipArchive, TarXzArchive, TarZstdArchive]
+ reasons = set()
+ for handler in handlers:
+ obj = handler(src, dest, file_args, module)
+ (can_handle, reason) = obj.can_handle_archive()
+ if can_handle:
+ return obj
+ reasons.add(reason)
+ reason_msg = '\n'.join(reasons)
+ module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed.\n%s' % (src, reason_msg))
+
+
+def main():
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ src=dict(type='path', required=True),
+ dest=dict(type='path', required=True),
+ remote_src=dict(type='bool', default=False),
+ creates=dict(type='path'),
+ list_files=dict(type='bool', default=False),
+ keep_newer=dict(type='bool', default=False),
+ exclude=dict(type='list', elements='str', default=[]),
+ include=dict(type='list', elements='str', default=[]),
+ extra_opts=dict(type='list', elements='str', default=[]),
+ validate_certs=dict(type='bool', default=True),
+ io_buffer_size=dict(type='int', default=64 * 1024),
+
+ # Options that are for the action plugin, but ignored by the module itself.
+ # We have them here so that the sanity tests pass without ignores, which
+ # reduces the likelihood of further bugs added.
+ copy=dict(type='bool', default=True),
+ decrypt=dict(type='bool', default=True),
+ ),
+ add_file_common_args=True,
+ # check-mode only works for zip files, we cover that later
+ supports_check_mode=True,
+ mutually_exclusive=[('include', 'exclude')],
+ )
+
+ src = module.params['src']
+ dest = module.params['dest']
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ remote_src = module.params['remote_src']
+ file_args = module.load_file_common_arguments(module.params)
+
+ # did tar file arrive?
+ if not os.path.exists(src):
+ if not remote_src:
+ module.fail_json(msg="Source '%s' failed to transfer" % src)
+ # If remote_src=true, and src= contains ://, try and download the file to a temp directory.
+ elif '://' in src:
+ src = fetch_file(module, src)
+ else:
+ module.fail_json(msg="Source '%s' does not exist" % src)
+ if not os.access(src, os.R_OK):
+ module.fail_json(msg="Source '%s' not readable" % src)
+
+ # skip working with 0 size archives
+ try:
+ if os.path.getsize(src) == 0:
+ module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src)
+ except Exception as e:
+ module.fail_json(msg="Source '%s' not readable, %s" % (src, to_native(e)))
+
+ # is dest OK to receive tar file?
+ if not os.path.isdir(b_dest):
+ module.fail_json(msg="Destination '%s' is not a directory" % dest)
+
+ handler = pick_handler(src, b_dest, file_args, module)
+
+ res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)
+
+ # do we need to do unpack?
+ check_results = handler.is_unarchived()
+
+ # DEBUG
+ # res_args['check_results'] = check_results
+
+ if module.check_mode:
+ res_args['changed'] = not check_results['unarchived']
+ elif check_results['unarchived']:
+ res_args['changed'] = False
+ else:
+ # do the unpack
+ try:
+ res_args['extract_results'] = handler.unarchive()
+ if res_args['extract_results']['rc'] != 0:
+ module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
+ except IOError:
+ module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
+ else:
+ res_args['changed'] = True
+
+ # Get diff if required
+ if check_results.get('diff', False):
+ res_args['diff'] = {'prepared': check_results['diff']}
+
+ # Run only if we found differences (idempotence) or diff was missing
+ if res_args.get('diff', True) and not module.check_mode:
+ # do we need to change perms?
+ top_folders = []
+ for filename in handler.files_in_archive:
+ file_args['path'] = os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict'))
+
+ try:
+ res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'], expand=False)
+ except (IOError, OSError) as e:
+ module.fail_json(msg="Unexpected error when accessing exploded file: %s" % to_native(e), **res_args)
+
+ if '/' in filename:
+ top_folder_path = filename.split('/')[0]
+ if top_folder_path not in top_folders:
+ top_folders.append(top_folder_path)
+
+ # make sure top folders have the right permissions
+ # https://github.com/ansible/ansible/issues/35426
+ if top_folders:
+ for f in top_folders:
+ file_args['path'] = "%s/%s" % (dest, f)
+ try:
+ res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'], expand=False)
+ except (IOError, OSError) as e:
+ module.fail_json(msg="Unexpected error when accessing exploded file: %s" % to_native(e), **res_args)
+
+ if module.params['list_files']:
+ res_args['files'] = handler.files_in_archive
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/uri.py b/lib/ansible/modules/uri.py
new file mode 100644
index 0000000..f68b86a
--- /dev/null
+++ b/lib/ansible/modules/uri.py
@@ -0,0 +1,779 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Romeo Theriault <romeot () hawaii.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: uri
+short_description: Interacts with webservices
+description:
+ - Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE
+ HTTP authentication mechanisms.
+ - For Windows targets, use the M(ansible.windows.win_uri) module instead.
+version_added: "1.1"
+options:
+ ciphers:
+ description:
+ - SSL/TLS Ciphers to use for the request.
+ - 'When a list is provided, all ciphers are joined in order with C(:)'
+ - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT)
+ for more details.
+ - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions
+ type: list
+ elements: str
+ version_added: '2.14'
+ decompress:
+ description:
+ - Whether to attempt to decompress gzip content-encoded responses
+ type: bool
+ default: true
+ version_added: '2.14'
+ url:
+ description:
+ - HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path
+ type: str
+ required: true
+ dest:
+ description:
+ - A path of where to download the file to (if desired). If I(dest) is a
+ directory, the basename of the file on the remote server will be used.
+ type: path
+ url_username:
+ description:
+ - A username for the module to use for Digest, Basic or WSSE authentication.
+ type: str
+ aliases: [ user ]
+ url_password:
+ description:
+ - A password for the module to use for Digest, Basic or WSSE authentication.
+ type: str
+ aliases: [ password ]
+ body:
+ description:
+ - The body of the http request/response to the web service. If C(body_format) is set
+ to 'json' it will take an already formatted JSON string or convert a data structure
+ into JSON.
+ - If C(body_format) is set to 'form-urlencoded' it will convert a dictionary
+ or list of tuples into an 'application/x-www-form-urlencoded' string. (Added in v2.7)
+ - If C(body_format) is set to 'form-multipart' it will convert a dictionary
+ into 'multipart/form-multipart' body. (Added in v2.10)
+ type: raw
+ body_format:
+ description:
+ - The serialization format of the body. When set to C(json), C(form-multipart), or C(form-urlencoded), encodes
+ the body argument, if needed, and automatically sets the Content-Type header accordingly.
+ - As of v2.3 it is possible to override the C(Content-Type) header, when
+ set to C(json) or C(form-urlencoded) via the I(headers) option.
+ - The 'Content-Type' header cannot be overridden when using C(form-multipart)
+ - C(form-urlencoded) was added in v2.7.
+ - C(form-multipart) was added in v2.10.
+ type: str
+ choices: [ form-urlencoded, json, raw, form-multipart ]
+ default: raw
+ version_added: "2.0"
+ method:
+ description:
+ - The HTTP method of the request or response.
+ - In more recent versions we do not restrict the method at the module level anymore
+ but it still must be a valid method accepted by the service handling the request.
+ type: str
+ default: GET
+ return_content:
+ description:
+ - Whether or not to return the body of the response as a "content" key in
+ the dictionary result no matter it succeeded or failed.
+ - Independently of this option, if the reported Content-type is "application/json", then the JSON is
+ always loaded into a key called C(json) in the dictionary results.
+ type: bool
+ default: no
+ force_basic_auth:
+ description:
+ - Force the sending of the Basic authentication header upon initial request.
+ - The library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail.
+ type: bool
+ default: no
+ follow_redirects:
+ description:
+ - Whether or not the URI module should follow redirects. C(all) will follow all redirects.
+ C(safe) will follow only "safe" redirects, where "safe" means that the client is only
+ doing a GET or HEAD on the URI to which it is being redirected. C(none) will not follow
+ any redirects. Note that C(true) and C(false) choices are accepted for backwards compatibility,
+ where C(true) is the equivalent of C(all) and C(false) is the equivalent of C(safe). C(true) and C(false)
+ are deprecated and will be removed in some future version of Ansible.
+ type: str
+ choices: ['all', 'no', 'none', 'safe', 'urllib2', 'yes']
+ default: safe
+ creates:
+ description:
+ - A filename, when it already exists, this step will not be run.
+ type: path
+ removes:
+ description:
+ - A filename, when it does not exist, this step will not be run.
+ type: path
+ status_code:
+ description:
+ - A list of valid, numeric, HTTP status codes that signifies success of the request.
+ type: list
+ elements: int
+ default: [ 200 ]
+ timeout:
+ description:
+ - The socket level timeout in seconds
+ type: int
+ default: 30
+ headers:
+ description:
+ - Add custom HTTP headers to a request in the format of a YAML hash. As
+ of C(2.3) supplying C(Content-Type) here will override the header
+ generated by supplying C(json) or C(form-urlencoded) for I(body_format).
+ type: dict
+ version_added: '2.1'
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated.
+ - This should only set to C(false) used on personally controlled sites using self-signed certificates.
+ - Prior to 1.9.2 the code defaulted to C(false).
+ type: bool
+ default: true
+ version_added: '1.9.2'
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - This file can also include the key as well, and if the key is included, I(client_key) is not required
+ type: path
+ version_added: '2.4'
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - If I(client_cert) contains both the certificate and key, this option is not required.
+ type: path
+ version_added: '2.4'
+ ca_path:
+ description:
+ - PEM formatted file that contains a CA certificate to be used for validation
+ type: path
+ version_added: '2.11'
+ src:
+ description:
+ - Path to file to be submitted to the remote server.
+ - Cannot be used with I(body).
+ - Should be used with I(force_basic_auth) to ensure success when the remote end sends a 401.
+ type: path
+ version_added: '2.7'
+ remote_src:
+ description:
+ - If C(false), the module will search for the C(src) on the controller node.
+ - If C(true), the module will search for the C(src) on the managed (remote) node.
+ type: bool
+ default: no
+ version_added: '2.7'
+ force:
+ description:
+ - If C(true) do not get a cached copy.
+ type: bool
+ default: no
+ use_proxy:
+ description:
+ - If C(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
+ type: bool
+ default: true
+ unix_socket:
+ description:
+ - Path to Unix domain socket to use for connection
+ type: path
+ version_added: '2.8'
+ http_agent:
+ description:
+ - Header to identify as, generally appears in web server logs.
+ type: str
+ default: ansible-httpget
+ unredirected_headers:
+ description:
+ - A list of header names that will not be sent on subsequent redirected requests. This list is case
+ insensitive. By default all headers will be redirected. In some cases it may be beneficial to list
+ headers such as C(Authorization) here to avoid potential credential exposure.
+ default: []
+ type: list
+ elements: str
+ version_added: '2.12'
+ use_gssapi:
+ description:
+ - Use GSSAPI to perform the authentication, typically this is for Kerberos or Kerberos through Negotiate
+ authentication.
+ - Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
+ - Credentials for GSSAPI can be specified with I(url_username)/I(url_password) or with the GSSAPI env var
+ C(KRB5CCNAME) that specified a custom Kerberos credential cache.
+ - NTLM authentication is C(not) supported even if the GSSAPI mech for NTLM has been installed.
+ type: bool
+ default: no
+ version_added: '2.11'
+ use_netrc:
+ description:
+ - Determining whether to use credentials from ``~/.netrc`` file
+ - By default .netrc is used with Basic authentication headers
+ - When set to False, .netrc credentials are ignored
+ type: bool
+ default: true
+ version_added: '2.14'
+extends_documentation_fragment:
+ - action_common_attributes
+ - files
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - The dependency on httplib2 was removed in Ansible 2.1.
+ - The module returns all the HTTP headers in lower-case.
+ - For Windows targets, use the M(ansible.windows.win_uri) module instead.
+seealso:
+- module: ansible.builtin.get_url
+- module: ansible.windows.win_uri
+author:
+- Romeo Theriault (@romeotheriault)
+'''
+
+EXAMPLES = r'''
+- name: Check that you can connect (GET) to a page and it returns a status 200
+ ansible.builtin.uri:
+ url: http://www.example.com
+
+- name: Check that a page returns a status 200 and fail if the word AWESOME is not in the page contents
+ ansible.builtin.uri:
+ url: http://www.example.com
+ return_content: true
+ register: this
+ failed_when: "'AWESOME' not in this.content"
+
+- name: Create a JIRA issue
+ ansible.builtin.uri:
+ url: https://your.jira.example.com/rest/api/2/issue/
+ user: your_username
+ password: your_pass
+ method: POST
+ body: "{{ lookup('ansible.builtin.file','issue.json') }}"
+ force_basic_auth: true
+ status_code: 201
+ body_format: json
+
+- name: Login to a form based webpage, then use the returned cookie to access the app in later tasks
+ ansible.builtin.uri:
+ url: https://your.form.based.auth.example.com/index.php
+ method: POST
+ body_format: form-urlencoded
+ body:
+ name: your_username
+ password: your_password
+ enter: Sign in
+ status_code: 302
+ register: login
+
+- name: Login to a form based webpage using a list of tuples
+ ansible.builtin.uri:
+ url: https://your.form.based.auth.example.com/index.php
+ method: POST
+ body_format: form-urlencoded
+ body:
+ - [ name, your_username ]
+ - [ password, your_password ]
+ - [ enter, Sign in ]
+ status_code: 302
+ register: login
+
+- name: Upload a file via multipart/form-multipart
+ ansible.builtin.uri:
+ url: https://httpbin.org/post
+ method: POST
+ body_format: form-multipart
+ body:
+ file1:
+ filename: /bin/true
+ mime_type: application/octet-stream
+ file2:
+ content: text based file content
+ filename: fake.txt
+ mime_type: text/plain
+ text_form_field: value
+
+- name: Connect to website using a previously stored cookie
+ ansible.builtin.uri:
+ url: https://your.form.based.auth.example.com/dashboard.php
+ method: GET
+ return_content: true
+ headers:
+ Cookie: "{{ login.cookies_string }}"
+
+- name: Queue build of a project in Jenkins
+ ansible.builtin.uri:
+ url: http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}
+ user: "{{ jenkins.user }}"
+ password: "{{ jenkins.password }}"
+ method: GET
+ force_basic_auth: true
+ status_code: 201
+
+- name: POST from contents of local file
+ ansible.builtin.uri:
+ url: https://httpbin.org/post
+ method: POST
+ src: file.json
+
+- name: POST from contents of remote file
+ ansible.builtin.uri:
+ url: https://httpbin.org/post
+ method: POST
+ src: /path/to/my/file.json
+ remote_src: true
+
+- name: Create workspaces in Log analytics Azure
+ ansible.builtin.uri:
+ url: https://www.mms.microsoft.com/Embedded/Api/ConfigDataSources/LogManagementData/Save
+ method: POST
+ body_format: json
+ status_code: [200, 202]
+ return_content: true
+ headers:
+ Content-Type: application/json
+ x-ms-client-workspace-path: /subscriptions/{{ sub_id }}/resourcegroups/{{ res_group }}/providers/microsoft.operationalinsights/workspaces/{{ w_spaces }}
+ x-ms-client-platform: ibiza
+ x-ms-client-auth-token: "{{ token_az }}"
+ body:
+
+- name: Pause play until a URL is reachable from this host
+ ansible.builtin.uri:
+ url: "http://192.0.2.1/some/test"
+ follow_redirects: none
+ method: GET
+ register: _result
+ until: _result.status == 200
+ retries: 720 # 720 * 5 seconds = 1hour (60*60/5)
+ delay: 5 # Every 5 seconds
+
+- name: Provide SSL/TLS ciphers as a list
+ uri:
+ url: https://example.org
+ ciphers:
+ - '@SECLEVEL=2'
+ - ECDH+AESGCM
+ - ECDH+CHACHA20
+ - ECDH+AES
+ - DHE+AES
+ - '!aNULL'
+ - '!eNULL'
+ - '!aDSS'
+ - '!SHA1'
+ - '!AESCCM'
+
+- name: Provide SSL/TLS ciphers as an OpenSSL formatted cipher list
+ uri:
+ url: https://example.org
+ ciphers: '@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM'
+'''
+
+RETURN = r'''
+# The return information includes all the HTTP headers in lower-case.
+content:
+ description: The response body content.
+ returned: status not in status_code or return_content is true
+ type: str
+ sample: "{}"
+cookies:
+ description: The cookie values placed in cookie jar.
+ returned: on success
+ type: dict
+ sample: {"SESSIONID": "[SESSIONID]"}
+ version_added: "2.4"
+cookies_string:
+ description: The value for future request Cookie headers.
+ returned: on success
+ type: str
+ sample: "SESSIONID=[SESSIONID]"
+ version_added: "2.6"
+elapsed:
+ description: The number of seconds that elapsed while performing the download.
+ returned: on success
+ type: int
+ sample: 23
+msg:
+ description: The HTTP message from the request.
+ returned: always
+ type: str
+ sample: OK (unknown bytes)
+path:
+ description: destination file/path
+ returned: dest is defined
+ type: str
+ sample: /path/to/file.txt
+redirected:
+ description: Whether the request was redirected.
+ returned: on success
+ type: bool
+ sample: false
+status:
+ description: The HTTP status code from the request.
+ returned: always
+ type: int
+ sample: 200
+url:
+ description: The actual URL used for the request.
+ returned: always
+ type: str
+ sample: https://www.ansible.com/
+'''
+
+import datetime
+import json
+import os
+import re
+import shutil
+import sys
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule, sanitize_keys
+from ansible.module_utils.six import PY2, PY3, binary_type, iteritems, string_types
+from ansible.module_utils.six.moves.urllib.parse import urlencode, urlsplit
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.module_utils.urls import fetch_url, get_response_filename, parse_content_type, prepare_multipart, url_argument_spec
+
+JSON_CANDIDATES = {'json', 'javascript'}
+
+# List of response key names we do not want sanitize_keys() to change.
+NO_MODIFY_KEYS = frozenset(
+ ('msg', 'exception', 'warnings', 'deprecations', 'failed', 'skipped',
+ 'changed', 'rc', 'stdout', 'stderr', 'elapsed', 'path', 'location',
+ 'content_type')
+)
+
+
+def format_message(err, resp):
+ msg = resp.pop('msg')
+ return err + (' %s' % msg if msg else '')
+
+
+def write_file(module, dest, content, resp):
+ """
+ Create temp file and write content to dest file only if content changed
+ """
+
+ tmpsrc = None
+
+ try:
+ fd, tmpsrc = tempfile.mkstemp(dir=module.tmpdir)
+ with os.fdopen(fd, 'wb') as f:
+ if isinstance(content, binary_type):
+ f.write(content)
+ else:
+ shutil.copyfileobj(content, f)
+ except Exception as e:
+ if tmpsrc and os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ msg = format_message("Failed to create temporary content file: %s" % to_native(e), resp)
+ module.fail_json(msg=msg, **resp)
+
+ checksum_src = module.sha1(tmpsrc)
+ checksum_dest = module.sha1(dest)
+
+ if checksum_src != checksum_dest:
+ try:
+ module.atomic_move(tmpsrc, dest)
+ except Exception as e:
+ if os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ msg = format_message("failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)), resp)
+ module.fail_json(msg=msg, **resp)
+
+ if os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+
+
+def absolute_location(url, location):
+ """Attempts to create an absolute URL based on initial URL, and
+ next URL, specifically in the case of a ``Location`` header.
+ """
+
+ if '://' in location:
+ return location
+
+ elif location.startswith('/'):
+ parts = urlsplit(url)
+ base = url.replace(parts[2], '')
+ return '%s%s' % (base, location)
+
+ elif not location.startswith('/'):
+ base = os.path.dirname(url)
+ return '%s/%s' % (base, location)
+
+ else:
+ return location
+
+
+def kv_list(data):
+ ''' Convert data into a list of key-value tuples '''
+ if data is None:
+ return None
+
+ if isinstance(data, Sequence):
+ return list(data)
+
+ if isinstance(data, Mapping):
+ return list(data.items())
+
+ raise TypeError('cannot form-urlencode body, expect list or dict')
+
+
+def form_urlencoded(body):
+ ''' Convert data into a form-urlencoded string '''
+ if isinstance(body, string_types):
+ return body
+
+ if isinstance(body, (Mapping, Sequence)):
+ result = []
+ # Turn a list of lists into a list of tuples that urlencode accepts
+ for key, values in kv_list(body):
+ if isinstance(values, string_types) or not isinstance(values, (Mapping, Sequence)):
+ values = [values]
+ for value in values:
+ if value is not None:
+ result.append((to_text(key), to_text(value)))
+ return urlencode(result, doseq=True)
+
+ return body
+
+
+def uri(module, url, dest, body, body_format, method, headers, socket_timeout, ca_path, unredirected_headers, decompress,
+ ciphers, use_netrc):
+ # is dest is set and is a directory, let's check if we get redirected and
+ # set the filename from that url
+
+ src = module.params['src']
+ if src:
+ try:
+ headers.update({
+ 'Content-Length': os.stat(src).st_size
+ })
+ data = open(src, 'rb')
+ except OSError:
+ module.fail_json(msg='Unable to open source file %s' % src, elapsed=0)
+ else:
+ data = body
+
+ kwargs = {}
+ if dest is not None and os.path.isfile(dest):
+ # if destination file already exist, only download if file newer
+ kwargs['last_mod_time'] = datetime.datetime.utcfromtimestamp(os.path.getmtime(dest))
+
+ resp, info = fetch_url(module, url, data=data, headers=headers,
+ method=method, timeout=socket_timeout, unix_socket=module.params['unix_socket'],
+ ca_path=ca_path, unredirected_headers=unredirected_headers,
+ use_proxy=module.params['use_proxy'], decompress=decompress,
+ ciphers=ciphers, use_netrc=use_netrc, **kwargs)
+
+ if src:
+ # Try to close the open file handle
+ try:
+ data.close()
+ except Exception:
+ pass
+
+ return resp, info
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ dest=dict(type='path'),
+ url_username=dict(type='str', aliases=['user']),
+ url_password=dict(type='str', aliases=['password'], no_log=True),
+ body=dict(type='raw'),
+ body_format=dict(type='str', default='raw', choices=['form-urlencoded', 'json', 'raw', 'form-multipart']),
+ src=dict(type='path'),
+ method=dict(type='str', default='GET'),
+ return_content=dict(type='bool', default=False),
+ follow_redirects=dict(type='str', default='safe', choices=['all', 'no', 'none', 'safe', 'urllib2', 'yes']),
+ creates=dict(type='path'),
+ removes=dict(type='path'),
+ status_code=dict(type='list', elements='int', default=[200]),
+ timeout=dict(type='int', default=30),
+ headers=dict(type='dict', default={}),
+ unix_socket=dict(type='path'),
+ remote_src=dict(type='bool', default=False),
+ ca_path=dict(type='path', default=None),
+ unredirected_headers=dict(type='list', elements='str', default=[]),
+ decompress=dict(type='bool', default=True),
+ ciphers=dict(type='list', elements='str'),
+ use_netrc=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ mutually_exclusive=[['body', 'src']],
+ )
+
+ url = module.params['url']
+ body = module.params['body']
+ body_format = module.params['body_format'].lower()
+ method = module.params['method'].upper()
+ dest = module.params['dest']
+ return_content = module.params['return_content']
+ creates = module.params['creates']
+ removes = module.params['removes']
+ status_code = [int(x) for x in list(module.params['status_code'])]
+ socket_timeout = module.params['timeout']
+ ca_path = module.params['ca_path']
+ dict_headers = module.params['headers']
+ unredirected_headers = module.params['unredirected_headers']
+ decompress = module.params['decompress']
+ ciphers = module.params['ciphers']
+ use_netrc = module.params['use_netrc']
+
+ if not re.match('^[A-Z]+$', method):
+ module.fail_json(msg="Parameter 'method' needs to be a single word in uppercase, like GET or POST.")
+
+ if body_format == 'json':
+ # Encode the body unless its a string, then assume it is pre-formatted JSON
+ if not isinstance(body, string_types):
+ body = json.dumps(body)
+ if 'content-type' not in [header.lower() for header in dict_headers]:
+ dict_headers['Content-Type'] = 'application/json'
+ elif body_format == 'form-urlencoded':
+ if not isinstance(body, string_types):
+ try:
+ body = form_urlencoded(body)
+ except ValueError as e:
+ module.fail_json(msg='failed to parse body as form_urlencoded: %s' % to_native(e), elapsed=0)
+ if 'content-type' not in [header.lower() for header in dict_headers]:
+ dict_headers['Content-Type'] = 'application/x-www-form-urlencoded'
+ elif body_format == 'form-multipart':
+ try:
+ content_type, body = prepare_multipart(body)
+ except (TypeError, ValueError) as e:
+ module.fail_json(msg='failed to parse body as form-multipart: %s' % to_native(e))
+ dict_headers['Content-Type'] = content_type
+
+ if creates is not None:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of uri executions.
+ if os.path.exists(creates):
+ module.exit_json(stdout="skipped, since '%s' exists" % creates, changed=False)
+
+ if removes is not None:
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of uri executions.
+ if not os.path.exists(removes):
+ module.exit_json(stdout="skipped, since '%s' does not exist" % removes, changed=False)
+
+ # Make the request
+ start = datetime.datetime.utcnow()
+ r, info = uri(module, url, dest, body, body_format, method,
+ dict_headers, socket_timeout, ca_path, unredirected_headers,
+ decompress, ciphers, use_netrc)
+
+ elapsed = (datetime.datetime.utcnow() - start).seconds
+
+ if r and dest is not None and os.path.isdir(dest):
+ filename = get_response_filename(r) or 'index.html'
+ dest = os.path.join(dest, filename)
+
+ if r and r.fp is not None:
+ # r may be None for some errors
+ # r.fp may be None depending on the error, which means there are no headers either
+ content_type, main_type, sub_type, content_encoding = parse_content_type(r)
+ else:
+ content_type = 'application/octet-stream'
+ main_type = 'application'
+ sub_type = 'octet-stream'
+ content_encoding = 'utf-8'
+
+ maybe_json = content_type and sub_type.lower() in JSON_CANDIDATES
+ maybe_output = maybe_json or return_content or info['status'] not in status_code
+
+ if maybe_output:
+ try:
+ if PY3 and (r.fp is None or r.closed):
+ raise TypeError
+ content = r.read()
+ except (AttributeError, TypeError):
+ # there was no content, but the error read()
+ # may have been stored in the info as 'body'
+ content = info.pop('body', b'')
+ elif r:
+ content = r
+ else:
+ content = None
+
+ resp = {}
+ resp['redirected'] = info['url'] != url
+ resp.update(info)
+
+ resp['elapsed'] = elapsed
+ resp['status'] = int(resp['status'])
+ resp['changed'] = False
+
+ # Write the file out if requested
+ if r and dest is not None:
+ if resp['status'] in status_code and resp['status'] != 304:
+ write_file(module, dest, content, resp)
+ # allow file attribute changes
+ resp['changed'] = True
+ module.params['path'] = dest
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ resp['changed'] = module.set_fs_attributes_if_different(file_args, resp['changed'])
+ resp['path'] = dest
+
+ # Transmogrify the headers, replacing '-' with '_', since variables don't
+ # work with dashes.
+ # In python3, the headers are title cased. Lowercase them to be
+ # compatible with the python2 behaviour.
+ uresp = {}
+ for key, value in iteritems(resp):
+ ukey = key.replace("-", "_").lower()
+ uresp[ukey] = value
+
+ if 'location' in uresp:
+ uresp['location'] = absolute_location(url, uresp['location'])
+
+ # Default content_encoding to try
+ if isinstance(content, binary_type):
+ u_content = to_text(content, encoding=content_encoding)
+ if maybe_json:
+ try:
+ js = json.loads(u_content)
+ uresp['json'] = js
+ except Exception:
+ if PY2:
+ sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2
+ else:
+ u_content = None
+
+ if module.no_log_values:
+ uresp = sanitize_keys(uresp, module.no_log_values, NO_MODIFY_KEYS)
+
+ if resp['status'] not in status_code:
+ uresp['msg'] = 'Status code was %s and not %s: %s' % (resp['status'], status_code, uresp.get('msg', ''))
+ if return_content:
+ module.fail_json(content=u_content, **uresp)
+ else:
+ module.fail_json(**uresp)
+ elif return_content:
+ module.exit_json(content=u_content, **uresp)
+ else:
+ module.exit_json(**uresp)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/user.py b/lib/ansible/modules/user.py
new file mode 100644
index 0000000..2fc4e47
--- /dev/null
+++ b/lib/ansible/modules/user.py
@@ -0,0 +1,3253 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: user
+version_added: "0.2"
+short_description: Manage user accounts
+description:
+ - Manage user accounts and user attributes.
+ - For Windows targets, use the M(ansible.windows.win_user) module instead.
+options:
+ name:
+ description:
+ - Name of the user to create, remove or modify.
+ type: str
+ required: true
+ aliases: [ user ]
+ uid:
+ description:
+ - Optionally sets the I(UID) of the user.
+ type: int
+ comment:
+ description:
+ - Optionally sets the description (aka I(GECOS)) of user account.
+ type: str
+ hidden:
+ description:
+ - macOS only, optionally hide the user from the login window and system preferences.
+ - The default will be C(true) if the I(system) option is used.
+ type: bool
+ version_added: "2.6"
+ non_unique:
+ description:
+ - Optionally when used with the -u option, this option allows to change the user ID to a non-unique value.
+ type: bool
+ default: no
+ version_added: "1.1"
+ seuser:
+ description:
+ - Optionally sets the seuser type (user_u) on selinux enabled systems.
+ type: str
+ version_added: "2.1"
+ group:
+ description:
+ - Optionally sets the user's primary group (takes a group name).
+ type: str
+ groups:
+ description:
+ - List of groups user will be added to.
+ - By default, the user is removed from all other groups. Configure C(append) to modify this.
+ - When set to an empty string C(''),
+ the user is removed from all groups except the primary group.
+ - Before Ansible 2.3, the only input format allowed was a comma separated string.
+ type: list
+ elements: str
+ append:
+ description:
+ - If C(true), add the user to the groups specified in C(groups).
+ - If C(false), user will only be added to the groups specified in C(groups),
+ removing them from all other groups.
+ type: bool
+ default: no
+ shell:
+ description:
+ - Optionally set the user's shell.
+ - On macOS, before Ansible 2.5, the default shell for non-system users was C(/usr/bin/false).
+ Since Ansible 2.5, the default shell for non-system users on macOS is C(/bin/bash).
+ - See notes for details on how other operating systems determine the default shell by
+ the underlying tool.
+ type: str
+ home:
+ description:
+ - Optionally set the user's home directory.
+ type: path
+ skeleton:
+ description:
+ - Optionally set a home skeleton directory.
+ - Requires C(create_home) option!
+ type: str
+ version_added: "2.0"
+ password:
+ description:
+ - If provided, set the user's password to the provided encrypted hash (Linux) or plain text password (macOS).
+ - B(Linux/Unix/POSIX:) Enter the hashed password as the value.
+ - See L(FAQ entry,https://docs.ansible.com/ansible/latest/reference_appendices/faq.html#how-do-i-generate-encrypted-passwords-for-the-user-module)
+ for details on various ways to generate the hash of a password.
+ - To create an account with a locked/disabled password on Linux systems, set this to C('!') or C('*').
+ - To create an account with a locked/disabled password on OpenBSD, set this to C('*************').
+ - B(OS X/macOS:) Enter the cleartext password as the value. Be sure to take relevant security precautions.
+ type: str
+ state:
+ description:
+ - Whether the account should exist or not, taking action if the state is different from what is stated.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ create_home:
+ description:
+ - Unless set to C(false), a home directory will be made for the user
+ when the account is created or if the home directory does not exist.
+ - Changed from C(createhome) to C(create_home) in Ansible 2.5.
+ type: bool
+ default: yes
+ aliases: [ createhome ]
+ move_home:
+ description:
+ - "If set to C(true) when used with C(home: ), attempt to move the user's old home
+ directory to the specified directory if it isn't there already and the old home exists."
+ type: bool
+ default: no
+ system:
+ description:
+ - When creating an account C(state=present), setting this to C(true) makes the user a system account.
+ - This setting cannot be changed on existing users.
+ type: bool
+ default: no
+ force:
+ description:
+ - This only affects C(state=absent), it forces removal of the user and associated directories on supported platforms.
+ - The behavior is the same as C(userdel --force), check the man page for C(userdel) on your system for details and support.
+ - When used with C(generate_ssh_key=yes) this forces an existing key to be overwritten.
+ type: bool
+ default: no
+ remove:
+ description:
+ - This only affects C(state=absent), it attempts to remove directories associated with the user.
+ - The behavior is the same as C(userdel --remove), check the man page for details and support.
+ type: bool
+ default: no
+ login_class:
+ description:
+ - Optionally sets the user's login class, a feature of most BSD OSs.
+ type: str
+ generate_ssh_key:
+ description:
+ - Whether to generate a SSH key for the user in question.
+ - This will B(not) overwrite an existing SSH key unless used with C(force=yes).
+ type: bool
+ default: no
+ version_added: "0.9"
+ ssh_key_bits:
+ description:
+ - Optionally specify number of bits in SSH key to create.
+ - The default value depends on ssh-keygen.
+ type: int
+ version_added: "0.9"
+ ssh_key_type:
+ description:
+ - Optionally specify the type of SSH key to generate.
+ - Available SSH key types will depend on implementation
+ present on target host.
+ type: str
+ default: rsa
+ version_added: "0.9"
+ ssh_key_file:
+ description:
+ - Optionally specify the SSH key filename.
+ - If this is a relative filename then it will be relative to the user's home directory.
+ - This parameter defaults to I(.ssh/id_rsa).
+ type: path
+ version_added: "0.9"
+ ssh_key_comment:
+ description:
+ - Optionally define the comment for the SSH key.
+ type: str
+ default: ansible-generated on $HOSTNAME
+ version_added: "0.9"
+ ssh_key_passphrase:
+ description:
+ - Set a passphrase for the SSH key.
+ - If no passphrase is provided, the SSH key will default to having no passphrase.
+ type: str
+ version_added: "0.9"
+ update_password:
+ description:
+ - C(always) will update passwords if they differ.
+ - C(on_create) will only set the password for newly created users.
+ type: str
+ choices: [ always, on_create ]
+ default: always
+ version_added: "1.3"
+ expires:
+ description:
+ - An expiry time for the user in epoch, it will be ignored on platforms that do not support this.
+ - Currently supported on GNU/Linux, FreeBSD, and DragonFlyBSD.
+ - Since Ansible 2.6 you can remove the expiry time by specifying a negative value.
+ Currently supported on GNU/Linux and FreeBSD.
+ type: float
+ version_added: "1.9"
+ password_lock:
+ description:
+ - Lock the password (C(usermod -L), C(usermod -U), C(pw lock)).
+ - Implementation differs by platform. This option does not always mean the user cannot login using other methods.
+ - This option does not disable the user, only lock the password.
+ - This must be set to C(False) in order to unlock a currently locked password. The absence of this parameter will not unlock a password.
+ - Currently supported on Linux, FreeBSD, DragonFlyBSD, NetBSD, OpenBSD.
+ type: bool
+ version_added: "2.6"
+ local:
+ description:
+ - Forces the use of "local" command alternatives on platforms that implement it.
+ - This is useful in environments that use centralized authentication when you want to manipulate the local users
+ (in other words, it uses C(luseradd) instead of C(useradd)).
+ - This will check C(/etc/passwd) for an existing account before invoking commands. If the local account database
+ exists somewhere other than C(/etc/passwd), this setting will not work properly.
+ - This requires that the above commands as well as C(/etc/passwd) must exist on the target host, otherwise it will be a fatal error.
+ type: bool
+ default: no
+ version_added: "2.4"
+ profile:
+ description:
+ - Sets the profile of the user.
+ - Does nothing when used with other platforms.
+ - Can set multiple profiles using comma separation.
+ - To delete all the profiles, use C(profile='').
+ - Currently supported on Illumos/Solaris.
+ type: str
+ version_added: "2.8"
+ authorization:
+ description:
+ - Sets the authorization of the user.
+ - Does nothing when used with other platforms.
+ - Can set multiple authorizations using comma separation.
+ - To delete all authorizations, use C(authorization='').
+ - Currently supported on Illumos/Solaris.
+ type: str
+ version_added: "2.8"
+ role:
+ description:
+ - Sets the role of the user.
+ - Does nothing when used with other platforms.
+ - Can set multiple roles using comma separation.
+ - To delete all roles, use C(role='').
+ - Currently supported on Illumos/Solaris.
+ type: str
+ version_added: "2.8"
+ password_expire_max:
+ description:
+ - Maximum number of days between password change.
+ - Supported on Linux only.
+ type: int
+ version_added: "2.11"
+ password_expire_min:
+ description:
+ - Minimum number of days between password change.
+ - Supported on Linux only.
+ type: int
+ version_added: "2.11"
+ umask:
+ description:
+ - Sets the umask of the user.
+ - Does nothing when used with other platforms.
+ - Currently supported on Linux.
+ - Requires C(local) is omitted or False.
+ type: str
+ version_added: "2.12"
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - There are specific requirements per platform on user management utilities. However
+ they generally come pre-installed with the system and Ansible will require they
+ are present at runtime. If they are not, a descriptive error message will be shown.
+ - On SunOS platforms, the shadow file is backed up automatically since this module edits it directly.
+ On other platforms, the shadow file is backed up by the underlying tools used by this module.
+ - On macOS, this module uses C(dscl) to create, modify, and delete accounts. C(dseditgroup) is used to
+ modify group membership. Accounts are hidden from the login window by modifying
+ C(/Library/Preferences/com.apple.loginwindow.plist).
+ - On FreeBSD, this module uses C(pw useradd) and C(chpass) to create, C(pw usermod) and C(chpass) to modify,
+ C(pw userdel) remove, C(pw lock) to lock, and C(pw unlock) to unlock accounts.
+ - On all other platforms, this module uses C(useradd) to create, C(usermod) to modify, and
+ C(userdel) to remove accounts.
+seealso:
+- module: ansible.posix.authorized_key
+- module: ansible.builtin.group
+- module: ansible.windows.win_user
+author:
+- Stephen Fromm (@sfromm)
+'''
+
+EXAMPLES = r'''
+- name: Add the user 'johnd' with a specific uid and a primary group of 'admin'
+ ansible.builtin.user:
+ name: johnd
+ comment: John Doe
+ uid: 1040
+ group: admin
+
+- name: Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups
+ ansible.builtin.user:
+ name: james
+ shell: /bin/bash
+ groups: admins,developers
+ append: yes
+
+- name: Remove the user 'johnd'
+ ansible.builtin.user:
+ name: johnd
+ state: absent
+ remove: yes
+
+- name: Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa
+ ansible.builtin.user:
+ name: jsmith
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_file: .ssh/id_rsa
+
+- name: Added a consultant whose account you want to expire
+ ansible.builtin.user:
+ name: james18
+ shell: /bin/zsh
+ groups: developers
+ expires: 1422403387
+
+- name: Starting at Ansible 2.6, modify user, remove expiry time
+ ansible.builtin.user:
+ name: james18
+ expires: -1
+
+- name: Set maximum expiration date for password
+ ansible.builtin.user:
+ name: ram19
+ password_expire_max: 10
+
+- name: Set minimum expiration date for password
+ ansible.builtin.user:
+ name: pushkar15
+ password_expire_min: 5
+'''
+
+RETURN = r'''
+append:
+ description: Whether or not to append the user to groups.
+ returned: When state is C(present) and the user exists
+ type: bool
+ sample: True
+comment:
+ description: Comment section from passwd file, usually the user name.
+ returned: When user exists
+ type: str
+ sample: Agent Smith
+create_home:
+ description: Whether or not to create the home directory.
+ returned: When user does not exist and not check mode
+ type: bool
+ sample: True
+force:
+ description: Whether or not a user account was forcibly deleted.
+ returned: When I(state) is C(absent) and user exists
+ type: bool
+ sample: False
+group:
+ description: Primary user group ID
+ returned: When user exists
+ type: int
+ sample: 1001
+groups:
+ description: List of groups of which the user is a member.
+ returned: When I(groups) is not empty and I(state) is C(present)
+ type: str
+ sample: 'chrony,apache'
+home:
+ description: "Path to user's home directory."
+ returned: When I(state) is C(present)
+ type: str
+ sample: '/home/asmith'
+move_home:
+ description: Whether or not to move an existing home directory.
+ returned: When I(state) is C(present) and user exists
+ type: bool
+ sample: False
+name:
+ description: User account name.
+ returned: always
+ type: str
+ sample: asmith
+password:
+ description: Masked value of the password.
+ returned: When I(state) is C(present) and I(password) is not empty
+ type: str
+ sample: 'NOT_LOGGING_PASSWORD'
+remove:
+ description: Whether or not to remove the user account.
+ returned: When I(state) is C(absent) and user exists
+ type: bool
+ sample: True
+shell:
+ description: User login shell.
+ returned: When I(state) is C(present)
+ type: str
+ sample: '/bin/bash'
+ssh_fingerprint:
+ description: Fingerprint of generated SSH key.
+ returned: When I(generate_ssh_key) is C(True)
+ type: str
+ sample: '2048 SHA256:aYNHYcyVm87Igh0IMEDMbvW0QDlRQfE0aJugp684ko8 ansible-generated on host (RSA)'
+ssh_key_file:
+ description: Path to generated SSH private key file.
+ returned: When I(generate_ssh_key) is C(True)
+ type: str
+ sample: /home/asmith/.ssh/id_rsa
+ssh_public_key:
+ description: Generated SSH public key file.
+ returned: When I(generate_ssh_key) is C(True)
+ type: str
+ sample: >
+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC95opt4SPEC06tOYsJQJIuN23BbLMGmYo8ysVZQc4h2DZE9ugbjWWGS1/pweUGjVstgzMkBEeBCByaEf/RJKNecKRPeGd2Bw9DCj/bn5Z6rGfNENKBmo
+ 618mUJBvdlEgea96QGjOwSB7/gmonduC7gsWDMNcOdSE3wJMTim4lddiBx4RgC9yXsJ6Tkz9BHD73MXPpT5ETnse+A3fw3IGVSjaueVnlUyUmOBf7fzmZbhlFVXf2Zi2rFTXqvbdGHKkzpw1U8eB8xFPP7y
+ d5u1u0e6Acju/8aZ/l17IDFiLke5IzlqIMRTEbDwLNeO84YQKWTm9fODHzhYe0yvxqLiK07 ansible-generated on host'
+stderr:
+ description: Standard error from running commands.
+ returned: When stderr is returned by a command that is run
+ type: str
+ sample: Group wheels does not exist
+stdout:
+ description: Standard output from running commands.
+ returned: When standard output is returned by the command that is run
+ type: str
+ sample:
+system:
+ description: Whether or not the account is a system account.
+ returned: When I(system) is passed to the module and the account does not exist
+ type: bool
+ sample: True
+uid:
+ description: User ID of the user account.
+ returned: When I(uid) is passed to the module
+ type: int
+ sample: 1044
+password_expire_max:
+ description: Maximum number of days during which a password is valid.
+ returned: When user exists
+ type: int
+ sample: 20
+password_expire_min:
+ description: Minimum number of days between password change
+ returned: When user exists
+ type: int
+ sample: 20
+'''
+
+
+import ctypes
+import ctypes.util
+import errno
+import grp
+import calendar
+import os
+import re
+import pty
+import pwd
+import select
+import shutil
+import socket
+import subprocess
+import time
+import math
+
+from ansible.module_utils import distro
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.sys_info import get_platform_subclass
+import ansible.module_utils.compat.typing as t
+
+
+class StructSpwdType(ctypes.Structure):
+ _fields_ = [
+ ('sp_namp', ctypes.c_char_p),
+ ('sp_pwdp', ctypes.c_char_p),
+ ('sp_lstchg', ctypes.c_long),
+ ('sp_min', ctypes.c_long),
+ ('sp_max', ctypes.c_long),
+ ('sp_warn', ctypes.c_long),
+ ('sp_inact', ctypes.c_long),
+ ('sp_expire', ctypes.c_long),
+ ('sp_flag', ctypes.c_ulong),
+ ]
+
+
+try:
+ _LIBC = ctypes.cdll.LoadLibrary(
+ t.cast(
+ str,
+ ctypes.util.find_library('c')
+ )
+ )
+ _LIBC.getspnam.argtypes = (ctypes.c_char_p,)
+ _LIBC.getspnam.restype = ctypes.POINTER(StructSpwdType)
+ HAVE_SPWD = True
+except AttributeError:
+ HAVE_SPWD = False
+
+
+_HASH_RE = re.compile(r'[^a-zA-Z0-9./=]')
+
+
+def getspnam(b_name):
+ return _LIBC.getspnam(b_name).contents
+
+
+class User(object):
+ """
+ This is a generic User manipulation class that is subclassed
+ based on platform.
+
+ A subclass may wish to override the following action methods:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ - ssh_key_gen()
+ - ssh_key_fingerprint()
+ - user_exists()
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+ platform = 'Generic'
+ distribution = None # type: str | None
+ PASSWORDFILE = '/etc/passwd'
+ SHADOWFILE = '/etc/shadow' # type: str | None
+ SHADOWFILE_EXPIRE_INDEX = 7
+ LOGIN_DEFS = '/etc/login.defs'
+ DATE_FORMAT = '%Y-%m-%d'
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(User)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.state = module.params['state']
+ self.name = module.params['name']
+ self.uid = module.params['uid']
+ self.hidden = module.params['hidden']
+ self.non_unique = module.params['non_unique']
+ self.seuser = module.params['seuser']
+ self.group = module.params['group']
+ self.comment = module.params['comment']
+ self.shell = module.params['shell']
+ self.password = module.params['password']
+ self.force = module.params['force']
+ self.remove = module.params['remove']
+ self.create_home = module.params['create_home']
+ self.move_home = module.params['move_home']
+ self.skeleton = module.params['skeleton']
+ self.system = module.params['system']
+ self.login_class = module.params['login_class']
+ self.append = module.params['append']
+ self.sshkeygen = module.params['generate_ssh_key']
+ self.ssh_bits = module.params['ssh_key_bits']
+ self.ssh_type = module.params['ssh_key_type']
+ self.ssh_comment = module.params['ssh_key_comment']
+ self.ssh_passphrase = module.params['ssh_key_passphrase']
+ self.update_password = module.params['update_password']
+ self.home = module.params['home']
+ self.expires = None
+ self.password_lock = module.params['password_lock']
+ self.groups = None
+ self.local = module.params['local']
+ self.profile = module.params['profile']
+ self.authorization = module.params['authorization']
+ self.role = module.params['role']
+ self.password_expire_max = module.params['password_expire_max']
+ self.password_expire_min = module.params['password_expire_min']
+ self.umask = module.params['umask']
+
+ if self.umask is not None and self.local:
+ module.fail_json(msg="'umask' can not be used with 'local'")
+
+ if module.params['groups'] is not None:
+ self.groups = ','.join(module.params['groups'])
+
+ if module.params['expires'] is not None:
+ try:
+ self.expires = time.gmtime(module.params['expires'])
+ except Exception as e:
+ module.fail_json(msg="Invalid value for 'expires' %s: %s" % (self.expires, to_native(e)))
+
+ if module.params['ssh_key_file'] is not None:
+ self.ssh_file = module.params['ssh_key_file']
+ else:
+ self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type)
+
+ if self.groups is None and self.append:
+ # Change the argument_spec in 2.14 and remove this warning
+ # required_by={'append': ['groups']}
+ module.warn("'append' is set, but no 'groups' are specified. Use 'groups' for appending new groups."
+ "This will change to an error in Ansible 2.14.")
+
+ def check_password_encrypted(self):
+ # Darwin needs cleartext password, so skip validation
+ if self.module.params['password'] and self.platform != 'Darwin':
+ maybe_invalid = False
+
+ # Allow setting certain passwords in order to disable the account
+ if self.module.params['password'] in set(['*', '!', '*************']):
+ maybe_invalid = False
+ else:
+ # : for delimiter, * for disable user, ! for lock user
+ # these characters are invalid in the password
+ if any(char in self.module.params['password'] for char in ':*!'):
+ maybe_invalid = True
+ if '$' not in self.module.params['password']:
+ maybe_invalid = True
+ else:
+ fields = self.module.params['password'].split("$")
+ if len(fields) >= 3:
+ # contains character outside the crypto constraint
+ if bool(_HASH_RE.search(fields[-1])):
+ maybe_invalid = True
+ # md5
+ if fields[1] == '1' and len(fields[-1]) != 22:
+ maybe_invalid = True
+ # sha256
+ if fields[1] == '5' and len(fields[-1]) != 43:
+ maybe_invalid = True
+ # sha512
+ if fields[1] == '6' and len(fields[-1]) != 86:
+ maybe_invalid = True
+ else:
+ maybe_invalid = True
+ if maybe_invalid:
+ self.module.warn("The input password appears not to have been hashed. "
+ "The 'password' argument must be encrypted for this module to work properly.")
+
+ def execute_command(self, cmd, use_unsafe_shell=False, data=None, obey_checkmode=True):
+ if self.module.check_mode and obey_checkmode:
+ self.module.debug('In check mode, would have run: "%s"' % cmd)
+ return (0, '', '')
+ else:
+ # cast all args to strings ansible-modules-core/issues/4397
+ cmd = [str(x) for x in cmd]
+ return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
+
+ def backup_shadow(self):
+ if not self.module.check_mode and self.SHADOWFILE:
+ return self.module.backup_local(self.SHADOWFILE)
+
+ def remove_user_userdel(self):
+ if self.local:
+ command_name = 'luserdel'
+ else:
+ command_name = 'userdel'
+
+ cmd = [self.module.get_bin_path(command_name, True)]
+ if self.force and not self.local:
+ cmd.append('-f')
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+
+ return self.execute_command(cmd)
+
+ def create_user_useradd(self):
+
+ if self.local:
+ command_name = 'luseradd'
+ lgroupmod_cmd = self.module.get_bin_path('lgroupmod', True)
+ lchage_cmd = self.module.get_bin_path('lchage', True)
+ else:
+ command_name = 'useradd'
+
+ cmd = [self.module.get_bin_path(command_name, True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.seuser is not None:
+ cmd.append('-Z')
+ cmd.append(self.seuser)
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+ elif self.group_exists(self.name):
+ # use the -N option (no user group) if a group already
+ # exists with the same name as the user to prevent
+ # errors from useradd trying to create a group when
+ # USERGROUPS_ENAB is set in /etc/login.defs.
+ if self.local:
+ # luseradd uses -n instead of -N
+ cmd.append('-n')
+ else:
+ if os.path.exists('/etc/redhat-release'):
+ dist = distro.version()
+ major_release = int(dist.split('.')[0])
+ if major_release <= 5:
+ cmd.append('-n')
+ else:
+ cmd.append('-N')
+ elif os.path.exists('/etc/SuSE-release'):
+ # -N did not exist in useradd before SLE 11 and did not
+ # automatically create a group
+ dist = distro.version()
+ major_release = int(dist.split('.')[0])
+ if major_release >= 12:
+ cmd.append('-N')
+ else:
+ cmd.append('-N')
+
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ if not self.local:
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ # If the specified path to the user home contains parent directories that
+ # do not exist and create_home is True first create the parent directory
+ # since useradd cannot create it.
+ if self.create_home:
+ parent = os.path.dirname(self.home)
+ if not os.path.isdir(parent):
+ self.create_homedir(self.home)
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.expires is not None and not self.local:
+ cmd.append('-e')
+ if self.expires < time.gmtime(0):
+ cmd.append('')
+ else:
+ cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
+
+ if self.password is not None:
+ cmd.append('-p')
+ if self.password_lock:
+ cmd.append('!%s' % self.password)
+ else:
+ cmd.append(self.password)
+
+ if self.create_home:
+ if not self.local:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+ else:
+ cmd.append('-M')
+
+ if self.system:
+ cmd.append('-r')
+
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+ if not self.local or rc != 0:
+ return (rc, out, err)
+
+ if self.expires is not None:
+ if self.expires < time.gmtime(0):
+ lexpires = -1
+ else:
+ # Convert seconds since Epoch to days since Epoch
+ lexpires = int(math.floor(self.module.params['expires'])) // 86400
+ (rc, _out, _err) = self.execute_command([lchage_cmd, '-E', to_native(lexpires), self.name])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+
+ if self.groups is None or len(self.groups) == 0:
+ return (rc, out, err)
+
+ for add_group in groups:
+ (rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-M', self.name, add_group])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+ return (rc, out, err)
+
+ def _check_usermod_append(self):
+ # check if this version of usermod can append groups
+
+ if self.local:
+ command_name = 'lusermod'
+ else:
+ command_name = 'usermod'
+
+ usermod_path = self.module.get_bin_path(command_name, True)
+
+ # for some reason, usermod --help cannot be used by non root
+ # on RH/Fedora, due to lack of execute bit for others
+ if not os.access(usermod_path, os.X_OK):
+ return False
+
+ cmd = [usermod_path, '--help']
+ (rc, data1, data2) = self.execute_command(cmd, obey_checkmode=False)
+ helpout = data1 + data2
+
+ # check if --append exists
+ lines = to_native(helpout).split('\n')
+ for line in lines:
+ if line.strip().startswith('-a, --append'):
+ return True
+
+ return False
+
+ def modify_user_usermod(self):
+
+ if self.local:
+ command_name = 'lusermod'
+ lgroupmod_cmd = self.module.get_bin_path('lgroupmod', True)
+ lgroupmod_add = set()
+ lgroupmod_del = set()
+ lchage_cmd = self.module.get_bin_path('lchage', True)
+ lexpires = None
+ else:
+ command_name = 'usermod'
+
+ cmd = [self.module.get_bin_path(command_name, True)]
+ info = self.user_info()
+ has_append = self._check_usermod_append()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(ginfo[2])
+
+ if self.groups is not None:
+ # get a list of all groups for the user, including the primary
+ current_groups = self.user_group_membership(exclude_primary=False)
+ groups_need_mod = False
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set(remove_existing=False)
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ if has_append:
+ cmd.append('-a')
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ if self.local:
+ if self.append:
+ lgroupmod_add = set(groups).difference(current_groups)
+ lgroupmod_del = set()
+ else:
+ lgroupmod_add = set(groups).difference(current_groups)
+ lgroupmod_del = set(current_groups).difference(groups)
+ else:
+ if self.append and not has_append:
+ cmd.append('-A')
+ cmd.append(','.join(group_diff))
+ else:
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ cmd.append('-d')
+ cmd.append(self.home)
+ if self.move_home:
+ cmd.append('-m')
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.expires is not None:
+
+ current_expires = int(self.user_password()[1])
+
+ if self.expires < time.gmtime(0):
+ if current_expires >= 0:
+ if self.local:
+ lexpires = -1
+ else:
+ cmd.append('-e')
+ cmd.append('')
+ else:
+ # Convert days since Epoch to seconds since Epoch as struct_time
+ current_expire_date = time.gmtime(current_expires * 86400)
+
+ # Current expires is negative or we compare year, month, and day only
+ if current_expires < 0 or current_expire_date[:3] != self.expires[:3]:
+ if self.local:
+ # Convert seconds since Epoch to days since Epoch
+ lexpires = int(math.floor(self.module.params['expires'])) // 86400
+ else:
+ cmd.append('-e')
+ cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
+
+ # Lock if no password or unlocked, unlock only if locked
+ if self.password_lock and not info[1].startswith('!'):
+ cmd.append('-L')
+ elif self.password_lock is False and info[1].startswith('!'):
+ # usermod will refuse to unlock a user with no password, module shows 'changed' regardless
+ cmd.append('-U')
+
+ if self.update_password == 'always' and self.password is not None and info[1].lstrip('!') != self.password.lstrip('!'):
+ # Remove options that are mutually exclusive with -p
+ cmd = [c for c in cmd if c not in ['-U', '-L']]
+ cmd.append('-p')
+ if self.password_lock:
+ # Lock the account and set the hash in a single command
+ cmd.append('!%s' % self.password)
+ else:
+ cmd.append(self.password)
+
+ (rc, out, err) = (None, '', '')
+
+ # skip if no usermod changes to be made
+ if len(cmd) > 1:
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+
+ if not self.local or not (rc is None or rc == 0):
+ return (rc, out, err)
+
+ if lexpires is not None:
+ (rc, _out, _err) = self.execute_command([lchage_cmd, '-E', to_native(lexpires), self.name])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+
+ if len(lgroupmod_add) == 0 and len(lgroupmod_del) == 0:
+ return (rc, out, err)
+
+ for add_group in lgroupmod_add:
+ (rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-M', self.name, add_group])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+
+ for del_group in lgroupmod_del:
+ (rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-m', self.name, del_group])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+ return (rc, out, err)
+
+ def group_exists(self, group):
+ try:
+ # Try group as a gid first
+ grp.getgrgid(int(group))
+ return True
+ except (ValueError, KeyError):
+ try:
+ grp.getgrnam(group)
+ return True
+ except KeyError:
+ return False
+
+ def group_info(self, group):
+ if not self.group_exists(group):
+ return False
+ try:
+ # Try group as a gid first
+ return list(grp.getgrgid(int(group)))
+ except (ValueError, KeyError):
+ return list(grp.getgrnam(group))
+
+ def get_groups_set(self, remove_existing=True):
+ if self.groups is None:
+ return None
+ info = self.user_info()
+ groups = set(x.strip() for x in self.groups.split(',') if x)
+ for g in groups.copy():
+ if not self.group_exists(g):
+ self.module.fail_json(msg="Group %s does not exist" % (g))
+ if info and remove_existing and self.group_info(g)[2] == info[3]:
+ groups.remove(g)
+ return groups
+
+ def user_group_membership(self, exclude_primary=True):
+ ''' Return a list of groups the user belongs to '''
+ groups = []
+ info = self.get_pwd_info()
+ for group in grp.getgrall():
+ if self.name in group.gr_mem:
+ # Exclude the user's primary group by default
+ if not exclude_primary:
+ groups.append(group[0])
+ else:
+ if info[3] != group.gr_gid:
+ groups.append(group[0])
+
+ return groups
+
+ def user_exists(self):
+ # The pwd module does not distinguish between local and directory accounts.
+ # It's output cannot be used to determine whether or not an account exists locally.
+ # It returns True if the account exists locally or in the directory, so instead
+ # look in the local PASSWORD file for an existing account.
+ if self.local:
+ if not os.path.exists(self.PASSWORDFILE):
+ self.module.fail_json(msg="'local: true' specified but unable to find local account file {0} to parse.".format(self.PASSWORDFILE))
+
+ exists = False
+ name_test = '{0}:'.format(self.name)
+ with open(self.PASSWORDFILE, 'rb') as f:
+ reversed_lines = f.readlines()[::-1]
+ for line in reversed_lines:
+ if line.startswith(to_bytes(name_test)):
+ exists = True
+ break
+
+ if not exists:
+ self.module.warn(
+ "'local: true' specified and user '{name}' was not found in {file}. "
+ "The local user account may already exist if the local account database exists "
+ "somewhere other than {file}.".format(file=self.PASSWORDFILE, name=self.name))
+
+ return exists
+
+ else:
+ try:
+ if pwd.getpwnam(self.name):
+ return True
+ except KeyError:
+ return False
+
+ def get_pwd_info(self):
+ if not self.user_exists():
+ return False
+ return list(pwd.getpwnam(self.name))
+
+ def user_info(self):
+ if not self.user_exists():
+ return False
+ info = self.get_pwd_info()
+ if len(info[1]) == 1 or len(info[1]) == 0:
+ info[1] = self.user_password()[0]
+ return info
+
+ def set_password_expire(self):
+ min_needs_change = self.password_expire_min is not None
+ max_needs_change = self.password_expire_max is not None
+
+ if HAVE_SPWD:
+ try:
+ shadow_info = getspnam(to_bytes(self.name))
+ except ValueError:
+ return None, '', ''
+
+ min_needs_change &= self.password_expire_min != shadow_info.sp_min
+ max_needs_change &= self.password_expire_max != shadow_info.sp_max
+
+ if not (min_needs_change or max_needs_change):
+ return (None, '', '') # target state already reached
+
+ command_name = 'chage'
+ cmd = [self.module.get_bin_path(command_name, True)]
+ if min_needs_change:
+ cmd.extend(["-m", self.password_expire_min])
+ if max_needs_change:
+ cmd.extend(["-M", self.password_expire_max])
+ cmd.append(self.name)
+
+ return self.execute_command(cmd)
+
+ def user_password(self):
+ passwd = ''
+ expires = ''
+ if HAVE_SPWD:
+ try:
+ shadow_info = getspnam(to_bytes(self.name))
+ passwd = to_native(shadow_info.sp_pwdp)
+ expires = shadow_info.sp_expire
+ return passwd, expires
+ except ValueError:
+ return passwd, expires
+
+ if not self.user_exists():
+ return passwd, expires
+ elif self.SHADOWFILE:
+ passwd, expires = self.parse_shadow_file()
+
+ return passwd, expires
+
+ def parse_shadow_file(self):
+ passwd = ''
+ expires = ''
+ if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
+ with open(self.SHADOWFILE, 'r') as f:
+ for line in f:
+ if line.startswith('%s:' % self.name):
+ passwd = line.split(':')[1]
+ expires = line.split(':')[self.SHADOWFILE_EXPIRE_INDEX] or -1
+ return passwd, expires
+
+ def get_ssh_key_path(self):
+ info = self.user_info()
+ if os.path.isabs(self.ssh_file):
+ ssh_key_file = self.ssh_file
+ else:
+ if not os.path.exists(info[5]) and not self.module.check_mode:
+ raise Exception('User %s home directory does not exist' % self.name)
+ ssh_key_file = os.path.join(info[5], self.ssh_file)
+ return ssh_key_file
+
+ def ssh_key_gen(self):
+ info = self.user_info()
+ overwrite = None
+ try:
+ ssh_key_file = self.get_ssh_key_path()
+ except Exception as e:
+ return (1, '', to_native(e))
+ ssh_dir = os.path.dirname(ssh_key_file)
+ if not os.path.exists(ssh_dir):
+ if self.module.check_mode:
+ return (0, '', '')
+ try:
+ os.mkdir(ssh_dir, int('0700', 8))
+ os.chown(ssh_dir, info[2], info[3])
+ except OSError as e:
+ return (1, '', 'Failed to create %s: %s' % (ssh_dir, to_native(e)))
+ if os.path.exists(ssh_key_file):
+ if self.force:
+ # ssh-keygen doesn't support overwriting the key interactively, so send 'y' to confirm
+ overwrite = 'y'
+ else:
+ return (None, 'Key already exists, use "force: yes" to overwrite', '')
+ cmd = [self.module.get_bin_path('ssh-keygen', True)]
+ cmd.append('-t')
+ cmd.append(self.ssh_type)
+ if self.ssh_bits > 0:
+ cmd.append('-b')
+ cmd.append(self.ssh_bits)
+ cmd.append('-C')
+ cmd.append(self.ssh_comment)
+ cmd.append('-f')
+ cmd.append(ssh_key_file)
+ if self.ssh_passphrase is not None:
+ if self.module.check_mode:
+ self.module.debug('In check mode, would have run: "%s"' % cmd)
+ return (0, '', '')
+
+ master_in_fd, slave_in_fd = pty.openpty()
+ master_out_fd, slave_out_fd = pty.openpty()
+ master_err_fd, slave_err_fd = pty.openpty()
+ env = os.environ.copy()
+ env['LC_ALL'] = get_best_parsable_locale(self.module)
+ try:
+ p = subprocess.Popen([to_bytes(c) for c in cmd],
+ stdin=slave_in_fd,
+ stdout=slave_out_fd,
+ stderr=slave_err_fd,
+ preexec_fn=os.setsid,
+ env=env)
+ out_buffer = b''
+ err_buffer = b''
+ while p.poll() is None:
+ r_list = select.select([master_out_fd, master_err_fd], [], [], 1)[0]
+ first_prompt = b'Enter passphrase (empty for no passphrase):'
+ second_prompt = b'Enter same passphrase again'
+ prompt = first_prompt
+ for fd in r_list:
+ if fd == master_out_fd:
+ chunk = os.read(master_out_fd, 10240)
+ out_buffer += chunk
+ if prompt in out_buffer:
+ os.write(master_in_fd, to_bytes(self.ssh_passphrase, errors='strict') + b'\r')
+ prompt = second_prompt
+ else:
+ chunk = os.read(master_err_fd, 10240)
+ err_buffer += chunk
+ if prompt in err_buffer:
+ os.write(master_in_fd, to_bytes(self.ssh_passphrase, errors='strict') + b'\r')
+ prompt = second_prompt
+ if b'Overwrite (y/n)?' in out_buffer or b'Overwrite (y/n)?' in err_buffer:
+ # The key was created between us checking for existence and now
+ return (None, 'Key already exists', '')
+
+ rc = p.returncode
+ out = to_native(out_buffer)
+ err = to_native(err_buffer)
+ except OSError as e:
+ return (1, '', to_native(e))
+ else:
+ cmd.append('-N')
+ cmd.append('')
+
+ (rc, out, err) = self.execute_command(cmd, data=overwrite)
+
+ if rc == 0 and not self.module.check_mode:
+ # If the keys were successfully created, we should be able
+ # to tweak ownership.
+ os.chown(ssh_key_file, info[2], info[3])
+ os.chown('%s.pub' % ssh_key_file, info[2], info[3])
+ return (rc, out, err)
+
+ def ssh_key_fingerprint(self):
+ ssh_key_file = self.get_ssh_key_path()
+ if not os.path.exists(ssh_key_file):
+ return (1, 'SSH Key file %s does not exist' % ssh_key_file, '')
+ cmd = [self.module.get_bin_path('ssh-keygen', True)]
+ cmd.append('-l')
+ cmd.append('-f')
+ cmd.append(ssh_key_file)
+
+ return self.execute_command(cmd, obey_checkmode=False)
+
+ def get_ssh_public_key(self):
+ ssh_public_key_file = '%s.pub' % self.get_ssh_key_path()
+ try:
+ with open(ssh_public_key_file, 'r') as f:
+ ssh_public_key = f.read().strip()
+ except IOError:
+ return None
+ return ssh_public_key
+
+ def create_user(self):
+ # by default we use the create_user_useradd method
+ return self.create_user_useradd()
+
+ def remove_user(self):
+ # by default we use the remove_user_userdel method
+ return self.remove_user_userdel()
+
+ def modify_user(self):
+ # by default we use the modify_user_usermod method
+ return self.modify_user_usermod()
+
+ def create_homedir(self, path):
+ if not os.path.exists(path):
+ if self.skeleton is not None:
+ skeleton = self.skeleton
+ else:
+ skeleton = '/etc/skel'
+
+ if os.path.exists(skeleton):
+ try:
+ shutil.copytree(skeleton, path, symlinks=True)
+ except OSError as e:
+ self.module.exit_json(failed=True, msg="%s" % to_native(e))
+ else:
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ self.module.exit_json(failed=True, msg="%s" % to_native(e))
+ # get umask from /etc/login.defs and set correct home mode
+ if os.path.exists(self.LOGIN_DEFS):
+ with open(self.LOGIN_DEFS, 'r') as f:
+ for line in f:
+ m = re.match(r'^UMASK\s+(\d+)$', line)
+ if m:
+ umask = int(m.group(1), 8)
+ mode = 0o777 & ~umask
+ try:
+ os.chmod(path, mode)
+ except OSError as e:
+ self.module.exit_json(failed=True, msg="%s" % to_native(e))
+
+ def chown_homedir(self, uid, gid, path):
+ try:
+ os.chown(path, uid, gid)
+ for root, dirs, files in os.walk(path):
+ for d in dirs:
+ os.chown(os.path.join(root, d), uid, gid)
+ for f in files:
+ os.chown(os.path.join(root, f), uid, gid)
+ except OSError as e:
+ self.module.exit_json(failed=True, msg="%s" % to_native(e))
+
+
+# ===========================================
+
+class FreeBsdUser(User):
+ """
+ This is a FreeBSD User manipulation class - it uses the pw command
+ to manipulate the user database, followed by the chpass command
+ to change the password.
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ platform = 'FreeBSD'
+ distribution = None
+ SHADOWFILE = '/etc/master.passwd'
+ SHADOWFILE_EXPIRE_INDEX = 6
+ DATE_FORMAT = '%d-%b-%Y'
+
+ def _handle_lock(self):
+ info = self.user_info()
+ if self.password_lock and not info[1].startswith('*LOCKED*'):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'lock',
+ self.name
+ ]
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+ return self.execute_command(cmd)
+ elif self.password_lock is False and info[1].startswith('*LOCKED*'):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'unlock',
+ self.name
+ ]
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+ return self.execute_command(cmd)
+
+ return (None, '', '')
+
+ def remove_user(self):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'userdel',
+ '-n',
+ self.name
+ ]
+ if self.remove:
+ cmd.append('-r')
+
+ return self.execute_command(cmd)
+
+ def create_user(self):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'useradd',
+ '-n',
+ self.name,
+ ]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.expires is not None:
+ cmd.append('-e')
+ if self.expires < time.gmtime(0):
+ cmd.append('0')
+ else:
+ cmd.append(str(calendar.timegm(self.expires)))
+
+ # system cannot be handled currently - should we error if its requested?
+ # create the user
+ (rc, out, err) = self.execute_command(cmd)
+
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ # we have to set the password in a second command
+ if self.password is not None:
+ cmd = [
+ self.module.get_bin_path('chpass', True),
+ '-p',
+ self.password,
+ self.name
+ ]
+ _rc, _out, _err = self.execute_command(cmd)
+ if rc is None:
+ rc = _rc
+ out += _out
+ err += _err
+
+ # we have to lock/unlock the password in a distinct command
+ _rc, _out, _err = self._handle_lock()
+ if rc is None:
+ rc = _rc
+ out += _out
+ err += _err
+
+ return (rc, out, err)
+
+ def modify_user(self):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'usermod',
+ '-n',
+ self.name
+ ]
+ cmd_len = len(cmd)
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ if (info[5] != self.home and self.move_home) or (not os.path.exists(self.home) and self.create_home):
+ cmd.append('-m')
+ if info[5] != self.home:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ # find current login class
+ user_login_class = None
+ if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
+ with open(self.SHADOWFILE, 'r') as f:
+ for line in f:
+ if line.startswith('%s:' % self.name):
+ user_login_class = line.split(':')[4]
+
+ # act only if login_class change
+ if self.login_class != user_login_class:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups = self.get_groups_set()
+
+ group_diff = set(current_groups).symmetric_difference(groups)
+ groups_need_mod = False
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append('-G')
+ new_groups = groups
+ if self.append:
+ new_groups = groups | set(current_groups)
+ cmd.append(','.join(new_groups))
+
+ if self.expires is not None:
+
+ current_expires = int(self.user_password()[1])
+
+ # If expiration is negative or zero and the current expiration is greater than zero, disable expiration.
+ # In OpenBSD, setting expiration to zero disables expiration. It does not expire the account.
+ if self.expires <= time.gmtime(0):
+ if current_expires > 0:
+ cmd.append('-e')
+ cmd.append('0')
+ else:
+ # Convert days since Epoch to seconds since Epoch as struct_time
+ current_expire_date = time.gmtime(current_expires)
+
+ # Current expires is negative or we compare year, month, and day only
+ if current_expires <= 0 or current_expire_date[:3] != self.expires[:3]:
+ cmd.append('-e')
+ cmd.append(str(calendar.timegm(self.expires)))
+
+ (rc, out, err) = (None, '', '')
+
+ # modify the user if cmd will do anything
+ if cmd_len != len(cmd):
+ (rc, _out, _err) = self.execute_command(cmd)
+ out += _out
+ err += _err
+
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ # we have to set the password in a second command
+ if self.update_password == 'always' and self.password is not None and info[1].lstrip('*LOCKED*') != self.password.lstrip('*LOCKED*'):
+ cmd = [
+ self.module.get_bin_path('chpass', True),
+ '-p',
+ self.password,
+ self.name
+ ]
+ _rc, _out, _err = self.execute_command(cmd)
+ if rc is None:
+ rc = _rc
+ out += _out
+ err += _err
+
+ # we have to lock/unlock the password in a distinct command
+ _rc, _out, _err = self._handle_lock()
+ if rc is None:
+ rc = _rc
+ out += _out
+ err += _err
+
+ return (rc, out, err)
+
+
+class DragonFlyBsdUser(FreeBsdUser):
+ """
+ This is a DragonFlyBSD User manipulation class - it inherits the
+ FreeBsdUser class behaviors, such as using the pw command to
+ manipulate the user database, followed by the chpass command
+ to change the password.
+ """
+
+ platform = 'DragonFly'
+
+
+class OpenBSDUser(User):
+ """
+ This is a OpenBSD User manipulation class.
+ Main differences are that OpenBSD:-
+ - has no concept of "system" account.
+ - has no force delete user
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ platform = 'OpenBSD'
+ distribution = None
+ SHADOWFILE = '/etc/master.passwd'
+
+ def create_user(self):
+ cmd = [self.module.get_bin_path('useradd', True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.password is not None and self.password != '*':
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def remove_user_userdel(self):
+ cmd = [self.module.get_bin_path('userdel', True)]
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def modify_user(self):
+ cmd = [self.module.get_bin_path('usermod', True)]
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups_need_mod = False
+ groups_option = '-S'
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_option = '-G'
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append(groups_option)
+ cmd.append(','.join(groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ if self.move_home:
+ cmd.append('-m')
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ # find current login class
+ user_login_class = None
+ userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name]
+ (rc, out, err) = self.execute_command(userinfo_cmd, obey_checkmode=False)
+
+ for line in out.splitlines():
+ tokens = line.split()
+
+ if tokens[0] == 'class' and len(tokens) == 2:
+ user_login_class = tokens[1]
+
+ # act only if login_class change
+ if self.login_class != user_login_class:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.password_lock and not info[1].startswith('*'):
+ cmd.append('-Z')
+ elif self.password_lock is False and info[1].startswith('*'):
+ cmd.append('-U')
+
+ if self.update_password == 'always' and self.password is not None \
+ and self.password != '*' and info[1] != self.password:
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ # skip if no changes to be made
+ if len(cmd) == 1:
+ return (None, '', '')
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+class NetBSDUser(User):
+ """
+ This is a NetBSD User manipulation class.
+ Main differences are that NetBSD:-
+ - has no concept of "system" account.
+ - has no force delete user
+
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ platform = 'NetBSD'
+ distribution = None
+ SHADOWFILE = '/etc/master.passwd'
+
+ def create_user(self):
+ cmd = [self.module.get_bin_path('useradd', True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ groups = self.get_groups_set()
+ if len(groups) > 16:
+ self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.password is not None:
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def remove_user_userdel(self):
+ cmd = [self.module.get_bin_path('userdel', True)]
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def modify_user(self):
+ cmd = [self.module.get_bin_path('usermod', True)]
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups_need_mod = False
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups = set(current_groups).union(groups)
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ if len(groups) > 16:
+ self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ if self.move_home:
+ cmd.append('-m')
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ if self.password_lock and not info[1].startswith('*LOCKED*'):
+ cmd.append('-C yes')
+ elif self.password_lock is False and info[1].startswith('*LOCKED*'):
+ cmd.append('-C no')
+
+ # skip if no changes to be made
+ if len(cmd) == 1:
+ return (None, '', '')
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+class SunOS(User):
+ """
+ This is a SunOS User manipulation class - The main difference between
+ this class and the generic user class is that Solaris-type distros
+ don't support the concept of a "system" account and we need to
+ edit the /etc/shadow file manually to set a password. (Ugh)
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ - user_info()
+ """
+
+ platform = 'SunOS'
+ distribution = None
+ SHADOWFILE = '/etc/shadow'
+ USER_ATTR = '/etc/user_attr'
+
+ def get_password_defaults(self):
+ # Read password aging defaults
+ try:
+ minweeks = ''
+ maxweeks = ''
+ warnweeks = ''
+ with open("/etc/default/passwd", 'r') as f:
+ for line in f:
+ line = line.strip()
+ if (line.startswith('#') or line == ''):
+ continue
+ m = re.match(r'^([^#]*)#(.*)$', line)
+ if m: # The line contains a hash / comment
+ line = m.group(1)
+ key, value = line.split('=')
+ if key == "MINWEEKS":
+ minweeks = value.rstrip('\n')
+ elif key == "MAXWEEKS":
+ maxweeks = value.rstrip('\n')
+ elif key == "WARNWEEKS":
+ warnweeks = value.rstrip('\n')
+ except Exception as err:
+ self.module.fail_json(msg="failed to read /etc/default/passwd: %s" % to_native(err))
+
+ return (minweeks, maxweeks, warnweeks)
+
+ def remove_user(self):
+ cmd = [self.module.get_bin_path('userdel', True)]
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+
+ return self.execute_command(cmd)
+
+ def create_user(self):
+ cmd = [self.module.get_bin_path('useradd', True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+
+ if self.profile is not None:
+ cmd.append('-P')
+ cmd.append(self.profile)
+
+ if self.authorization is not None:
+ cmd.append('-A')
+ cmd.append(self.authorization)
+
+ if self.role is not None:
+ cmd.append('-R')
+ cmd.append(self.role)
+
+ cmd.append(self.name)
+
+ (rc, out, err) = self.execute_command(cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ if not self.module.check_mode:
+ # we have to set the password by editing the /etc/shadow file
+ if self.password is not None:
+ self.backup_shadow()
+ minweeks, maxweeks, warnweeks = self.get_password_defaults()
+ try:
+ lines = []
+ with open(self.SHADOWFILE, 'rb') as f:
+ for line in f:
+ line = to_native(line, errors='surrogate_or_strict')
+ fields = line.strip().split(':')
+ if not fields[0] == self.name:
+ lines.append(line)
+ continue
+ fields[1] = self.password
+ fields[2] = str(int(time.time() // 86400))
+ if minweeks:
+ try:
+ fields[3] = str(int(minweeks) * 7)
+ except ValueError:
+ # mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
+ pass
+ if maxweeks:
+ try:
+ fields[4] = str(int(maxweeks) * 7)
+ except ValueError:
+ # mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
+ pass
+ if warnweeks:
+ try:
+ fields[5] = str(int(warnweeks) * 7)
+ except ValueError:
+ # mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
+ pass
+ line = ':'.join(fields)
+ lines.append('%s\n' % line)
+ with open(self.SHADOWFILE, 'w+') as f:
+ f.writelines(lines)
+ except Exception as err:
+ self.module.fail_json(msg="failed to update users password: %s" % to_native(err))
+
+ return (rc, out, err)
+
+ def modify_user_usermod(self):
+ cmd = [self.module.get_bin_path('usermod', True)]
+ cmd_len = len(cmd)
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+ groups_need_mod = False
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append('-G')
+ new_groups = groups
+ if self.append:
+ new_groups.update(current_groups)
+ cmd.append(','.join(new_groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ if self.move_home:
+ cmd.append('-m')
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.profile is not None and info[7] != self.profile:
+ cmd.append('-P')
+ cmd.append(self.profile)
+
+ if self.authorization is not None and info[8] != self.authorization:
+ cmd.append('-A')
+ cmd.append(self.authorization)
+
+ if self.role is not None and info[9] != self.role:
+ cmd.append('-R')
+ cmd.append(self.role)
+
+ # modify the user if cmd will do anything
+ if cmd_len != len(cmd):
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+ else:
+ (rc, out, err) = (None, '', '')
+
+ # we have to set the password by editing the /etc/shadow file
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ self.backup_shadow()
+ (rc, out, err) = (0, '', '')
+ if not self.module.check_mode:
+ minweeks, maxweeks, warnweeks = self.get_password_defaults()
+ try:
+ lines = []
+ with open(self.SHADOWFILE, 'rb') as f:
+ for line in f:
+ line = to_native(line, errors='surrogate_or_strict')
+ fields = line.strip().split(':')
+ if not fields[0] == self.name:
+ lines.append(line)
+ continue
+ fields[1] = self.password
+ fields[2] = str(int(time.time() // 86400))
+ if minweeks:
+ fields[3] = str(int(minweeks) * 7)
+ if maxweeks:
+ fields[4] = str(int(maxweeks) * 7)
+ if warnweeks:
+ fields[5] = str(int(warnweeks) * 7)
+ line = ':'.join(fields)
+ lines.append('%s\n' % line)
+ with open(self.SHADOWFILE, 'w+') as f:
+ f.writelines(lines)
+ rc = 0
+ except Exception as err:
+ self.module.fail_json(msg="failed to update users password: %s" % to_native(err))
+
+ return (rc, out, err)
+
+ def user_info(self):
+ info = super(SunOS, self).user_info()
+ if info:
+ info += self._user_attr_info()
+ return info
+
+ def _user_attr_info(self):
+ info = [''] * 3
+ with open(self.USER_ATTR, 'r') as file_handler:
+ for line in file_handler:
+ lines = line.strip().split('::::')
+ if lines[0] == self.name:
+ tmp = dict(x.split('=') for x in lines[1].split(';'))
+ info[0] = tmp.get('profiles', '')
+ info[1] = tmp.get('auths', '')
+ info[2] = tmp.get('roles', '')
+ return info
+
+
+class DarwinUser(User):
+ """
+ This is a Darwin macOS User manipulation class.
+ Main differences are that Darwin:-
+ - Handles accounts in a database managed by dscl(1)
+ - Has no useradd/groupadd
+ - Does not create home directories
+ - User password must be cleartext
+ - UID must be given
+ - System users must ben under 500
+
+ This overrides the following methods from the generic class:-
+ - user_exists()
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+ platform = 'Darwin'
+ distribution = None
+ SHADOWFILE = None
+
+ dscl_directory = '.'
+
+ fields = [
+ ('comment', 'RealName'),
+ ('home', 'NFSHomeDirectory'),
+ ('shell', 'UserShell'),
+ ('uid', 'UniqueID'),
+ ('group', 'PrimaryGroupID'),
+ ('hidden', 'IsHidden'),
+ ]
+
+ def __init__(self, module):
+
+ super(DarwinUser, self).__init__(module)
+
+ # make the user hidden if option is set or deffer to system option
+ if self.hidden is None:
+ if self.system:
+ self.hidden = 1
+ elif self.hidden:
+ self.hidden = 1
+ else:
+ self.hidden = 0
+
+ # add hidden to processing if set
+ if self.hidden is not None:
+ self.fields.append(('hidden', 'IsHidden'))
+
+ def _get_dscl(self):
+ return [self.module.get_bin_path('dscl', True), self.dscl_directory]
+
+ def _list_user_groups(self):
+ cmd = self._get_dscl()
+ cmd += ['-search', '/Groups', 'GroupMembership', self.name]
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ groups = []
+ for line in out.splitlines():
+ if line.startswith(' ') or line.startswith(')'):
+ continue
+ groups.append(line.split()[0])
+ return groups
+
+ def _get_user_property(self, property):
+ '''Return user PROPERTY as given my dscl(1) read or None if not found.'''
+ cmd = self._get_dscl()
+ cmd += ['-read', '/Users/%s' % self.name, property]
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ if rc != 0:
+ return None
+ # from dscl(1)
+ # if property contains embedded spaces, the list will instead be
+ # displayed one entry per line, starting on the line after the key.
+ lines = out.splitlines()
+ # sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines))
+ if len(lines) == 1:
+ return lines[0].split(': ')[1]
+ if len(lines) > 2:
+ return '\n'.join([lines[1].strip()] + lines[2:])
+ if len(lines) == 2:
+ return lines[1].strip()
+ return None
+
+ def _get_next_uid(self, system=None):
+ '''
+ Return the next available uid. If system=True, then
+ uid should be below of 500, if possible.
+ '''
+ cmd = self._get_dscl()
+ cmd += ['-list', '/Users', 'UniqueID']
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ if rc != 0:
+ self.module.fail_json(
+ msg="Unable to get the next available uid",
+ rc=rc,
+ out=out,
+ err=err
+ )
+
+ max_uid = 0
+ max_system_uid = 0
+ for line in out.splitlines():
+ current_uid = int(line.split(' ')[-1])
+ if max_uid < current_uid:
+ max_uid = current_uid
+ if max_system_uid < current_uid and current_uid < 500:
+ max_system_uid = current_uid
+
+ if system and (0 < max_system_uid < 499):
+ return max_system_uid + 1
+ return max_uid + 1
+
+ def _change_user_password(self):
+ '''Change password for SELF.NAME against SELF.PASSWORD.
+
+ Please note that password must be cleartext.
+ '''
+ # some documentation on how is stored passwords on OSX:
+ # http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/
+ # http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/
+ # http://pastebin.com/RYqxi7Ca
+ # on OSX 10.8+ hash is SALTED-SHA512-PBKDF2
+ # https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html
+ # https://gist.github.com/nueh/8252572
+ cmd = self._get_dscl()
+ if self.password:
+ cmd += ['-passwd', '/Users/%s' % self.name, self.password]
+ else:
+ cmd += ['-create', '/Users/%s' % self.name, 'Password', '*']
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Error when changing password', err=err, out=out, rc=rc)
+ return (rc, out, err)
+
+ def _make_group_numerical(self):
+ '''Convert SELF.GROUP to is stringed numerical value suitable for dscl.'''
+ if self.group is None:
+ self.group = 'nogroup'
+ try:
+ self.group = grp.getgrnam(self.group).gr_gid
+ except KeyError:
+ self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group)
+ # We need to pass a string to dscl
+ self.group = str(self.group)
+
+ def __modify_group(self, group, action):
+ '''Add or remove SELF.NAME to or from GROUP depending on ACTION.
+ ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
+ if action == 'add':
+ option = '-a'
+ else:
+ option = '-d'
+ cmd = ['dseditgroup', '-o', 'edit', option, self.name, '-t', 'user', group]
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot %s user "%s" to group "%s".'
+ % (action, self.name, group), err=err, out=out, rc=rc)
+ return (rc, out, err)
+
+ def _modify_group(self):
+ '''Add or remove SELF.NAME to or from GROUP depending on ACTION.
+ ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
+
+ rc = 0
+ out = ''
+ err = ''
+ changed = False
+
+ current = set(self._list_user_groups())
+ if self.groups is not None:
+ target = set(self.groups.split(','))
+ else:
+ target = set([])
+
+ if self.append is False:
+ for remove in current - target:
+ (_rc, _out, _err) = self.__modify_group(remove, 'delete')
+ rc += rc
+ out += _out
+ err += _err
+ changed = True
+
+ for add in target - current:
+ (_rc, _out, _err) = self.__modify_group(add, 'add')
+ rc += _rc
+ out += _out
+ err += _err
+ changed = True
+
+ return (rc, out, err, changed)
+
+ def _update_system_user(self):
+ '''Hide or show user on login window according SELF.SYSTEM.
+
+ Returns 0 if a change has been made, None otherwise.'''
+
+ plist_file = '/Library/Preferences/com.apple.loginwindow.plist'
+
+ # http://support.apple.com/kb/HT5017?viewlocale=en_US
+ cmd = ['defaults', 'read', plist_file, 'HiddenUsersList']
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ # returned value is
+ # (
+ # "_userA",
+ # "_UserB",
+ # userc
+ # )
+ hidden_users = []
+ for x in out.splitlines()[1:-1]:
+ try:
+ x = x.split('"')[1]
+ except IndexError:
+ x = x.strip()
+ hidden_users.append(x)
+
+ if self.system:
+ if self.name not in hidden_users:
+ cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array-add', self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot user "%s" to hidden user list.' % self.name, err=err, out=out, rc=rc)
+ return 0
+ else:
+ if self.name in hidden_users:
+ del (hidden_users[hidden_users.index(self.name)])
+
+ cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array'] + hidden_users
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot remove user "%s" from hidden user list.' % self.name, err=err, out=out, rc=rc)
+ return 0
+
+ def user_exists(self):
+ '''Check is SELF.NAME is a known user on the system.'''
+ cmd = self._get_dscl()
+ cmd += ['-read', '/Users/%s' % self.name, 'UniqueID']
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ return rc == 0
+
+ def remove_user(self):
+ '''Delete SELF.NAME. If SELF.FORCE is true, remove its home directory.'''
+ info = self.user_info()
+
+ cmd = self._get_dscl()
+ cmd += ['-delete', '/Users/%s' % self.name]
+ (rc, out, err) = self.execute_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Cannot delete user "%s".' % self.name, err=err, out=out, rc=rc)
+
+ if self.force:
+ if os.path.exists(info[5]):
+ shutil.rmtree(info[5])
+ out += "Removed %s" % info[5]
+
+ return (rc, out, err)
+
+ def create_user(self, command_name='dscl'):
+ cmd = self._get_dscl()
+ cmd += ['-create', '/Users/%s' % self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot create user "%s".' % self.name, err=err, out=out, rc=rc)
+
+ self._make_group_numerical()
+ if self.uid is None:
+ self.uid = str(self._get_next_uid(self.system))
+
+ # Homedir is not created by default
+ if self.create_home:
+ if self.home is None:
+ self.home = '/Users/%s' % self.name
+ if not self.module.check_mode:
+ if not os.path.exists(self.home):
+ os.makedirs(self.home)
+ self.chown_homedir(int(self.uid), int(self.group), self.home)
+
+ # dscl sets shell to /usr/bin/false when UserShell is not specified
+ # so set the shell to /bin/bash when the user is not a system user
+ if not self.system and self.shell is None:
+ self.shell = '/bin/bash'
+
+ for field in self.fields:
+ if field[0] in self.__dict__ and self.__dict__[field[0]]:
+
+ cmd = self._get_dscl()
+ cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
+ (rc, _out, _err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot add property "%s" to user "%s".' % (field[0], self.name), err=err, out=out, rc=rc)
+
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, _out, _err)
+
+ (rc, _out, _err) = self._change_user_password()
+ out += _out
+ err += _err
+
+ self._update_system_user()
+ # here we don't care about change status since it is a creation,
+ # thus changed is always true.
+ if self.groups:
+ (rc, _out, _err, changed) = self._modify_group()
+ out += _out
+ err += _err
+ return (rc, out, err)
+
+ def modify_user(self):
+ changed = None
+ out = ''
+ err = ''
+
+ if self.group:
+ self._make_group_numerical()
+
+ for field in self.fields:
+ if field[0] in self.__dict__ and self.__dict__[field[0]]:
+ current = self._get_user_property(field[1])
+ if current is None or current != to_text(self.__dict__[field[0]]):
+ cmd = self._get_dscl()
+ cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
+ (rc, _out, _err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(
+ msg='Cannot update property "%s" for user "%s".'
+ % (field[0], self.name), err=err, out=out, rc=rc)
+ changed = rc
+ out += _out
+ err += _err
+ if self.update_password == 'always' and self.password is not None:
+ (rc, _out, _err) = self._change_user_password()
+ out += _out
+ err += _err
+ changed = rc
+
+ if self.groups:
+ (rc, _out, _err, _changed) = self._modify_group()
+ out += _out
+ err += _err
+
+ if _changed is True:
+ changed = rc
+
+ rc = self._update_system_user()
+ if rc == 0:
+ changed = rc
+
+ return (changed, out, err)
+
+
+class AIX(User):
+ """
+ This is a AIX User manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ - parse_shadow_file()
+ """
+
+ platform = 'AIX'
+ distribution = None
+ SHADOWFILE = '/etc/security/passwd'
+
+ def remove_user(self):
+ cmd = [self.module.get_bin_path('userdel', True)]
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+
+ return self.execute_command(cmd)
+
+ def create_user_useradd(self, command_name='useradd'):
+ cmd = [self.module.get_bin_path(command_name, True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+
+ # set password with chpasswd
+ if self.password is not None:
+ cmd = []
+ cmd.append(self.module.get_bin_path('chpasswd', True))
+ cmd.append('-e')
+ cmd.append('-c')
+ self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
+
+ return (rc, out, err)
+
+ def modify_user_usermod(self):
+ cmd = [self.module.get_bin_path('usermod', True)]
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups_need_mod = False
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ if self.move_home:
+ cmd.append('-m')
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ # skip if no changes to be made
+ if len(cmd) == 1:
+ (rc, out, err) = (None, '', '')
+ else:
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+
+ # set password with chpasswd
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ cmd = []
+ cmd.append(self.module.get_bin_path('chpasswd', True))
+ cmd.append('-e')
+ cmd.append('-c')
+ (rc2, out2, err2) = self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
+ else:
+ (rc2, out2, err2) = (None, '', '')
+
+ if rc is not None:
+ return (rc, out + out2, err + err2)
+ else:
+ return (rc2, out + out2, err + err2)
+
+ def parse_shadow_file(self):
+ """Example AIX shadowfile data:
+ nobody:
+ password = *
+
+ operator1:
+ password = {ssha512}06$xxxxxxxxxxxx....
+ lastupdate = 1549558094
+
+ test1:
+ password = *
+ lastupdate = 1553695126
+
+ """
+
+ b_name = to_bytes(self.name)
+ b_passwd = b''
+ b_expires = b''
+ if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
+ with open(self.SHADOWFILE, 'rb') as bf:
+ b_lines = bf.readlines()
+
+ b_passwd_line = b''
+ b_expires_line = b''
+ try:
+ for index, b_line in enumerate(b_lines):
+ # Get password and lastupdate lines which come after the username
+ if b_line.startswith(b'%s:' % b_name):
+ b_passwd_line = b_lines[index + 1]
+ b_expires_line = b_lines[index + 2]
+ break
+
+ # Sanity check the lines because sometimes both are not present
+ if b' = ' in b_passwd_line:
+ b_passwd = b_passwd_line.split(b' = ', 1)[-1].strip()
+
+ if b' = ' in b_expires_line:
+ b_expires = b_expires_line.split(b' = ', 1)[-1].strip()
+
+ except IndexError:
+ self.module.fail_json(msg='Failed to parse shadow file %s' % self.SHADOWFILE)
+
+ passwd = to_native(b_passwd)
+ expires = to_native(b_expires) or -1
+ return passwd, expires
+
+
+class HPUX(User):
+ """
+ This is a HP-UX User manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ platform = 'HP-UX'
+ distribution = None
+ SHADOWFILE = '/etc/shadow'
+
+ def create_user(self):
+ cmd = ['/usr/sam/lbin/useradd.sam']
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.password is not None:
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ if self.create_home:
+ cmd.append('-m')
+ else:
+ cmd.append('-M')
+
+ if self.system:
+ cmd.append('-r')
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def remove_user(self):
+ cmd = ['/usr/sam/lbin/userdel.sam']
+ if self.force:
+ cmd.append('-F')
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def modify_user(self):
+ cmd = ['/usr/sam/lbin/usermod.sam']
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups_need_mod = False
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set(remove_existing=False)
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append('-G')
+ new_groups = groups
+ if self.append:
+ new_groups = groups | set(current_groups)
+ cmd.append(','.join(new_groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ cmd.append('-d')
+ cmd.append(self.home)
+ if self.move_home:
+ cmd.append('-m')
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ cmd.append('-F')
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ # skip if no changes to be made
+ if len(cmd) == 1:
+ return (None, '', '')
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+class BusyBox(User):
+ """
+ This is the BusyBox class for use on systems that have adduser, deluser,
+ and delgroup commands. It overrides the following methods:
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ def create_user(self):
+ cmd = [self.module.get_bin_path('adduser', True)]
+
+ cmd.append('-D')
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg='Group {0} does not exist'.format(self.group))
+ cmd.append('-G')
+ cmd.append(self.group)
+
+ if self.comment is not None:
+ cmd.append('-g')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-h')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if not self.create_home:
+ cmd.append('-H')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+
+ if self.system:
+ cmd.append('-S')
+
+ cmd.append(self.name)
+
+ rc, out, err = self.execute_command(cmd)
+
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ if self.password is not None:
+ cmd = [self.module.get_bin_path('chpasswd', True)]
+ cmd.append('--encrypted')
+ data = '{name}:{password}'.format(name=self.name, password=self.password)
+ rc, out, err = self.execute_command(cmd, data=data)
+
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ # Add to additional groups
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ add_cmd_bin = self.module.get_bin_path('adduser', True)
+ for group in groups:
+ cmd = [add_cmd_bin, self.name, group]
+ rc, out, err = self.execute_command(cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ return rc, out, err
+
+ def remove_user(self):
+
+ cmd = [
+ self.module.get_bin_path('deluser', True),
+ self.name
+ ]
+
+ if self.remove:
+ cmd.append('--remove-home')
+
+ return self.execute_command(cmd)
+
+ def modify_user(self):
+ current_groups = self.user_group_membership()
+ groups = []
+ rc = None
+ out = ''
+ err = ''
+ info = self.user_info()
+ add_cmd_bin = self.module.get_bin_path('adduser', True)
+ remove_cmd_bin = self.module.get_bin_path('delgroup', True)
+
+ # Manage group membership
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ for g in groups:
+ if g in group_diff:
+ add_cmd = [add_cmd_bin, self.name, g]
+ rc, out, err = self.execute_command(add_cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ for g in group_diff:
+ if g not in groups and not self.append:
+ remove_cmd = [remove_cmd_bin, self.name, g]
+ rc, out, err = self.execute_command(remove_cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ # Manage password
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ cmd = [self.module.get_bin_path('chpasswd', True)]
+ cmd.append('--encrypted')
+ data = '{name}:{password}'.format(name=self.name, password=self.password)
+ rc, out, err = self.execute_command(cmd, data=data)
+
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ return rc, out, err
+
+
+class Alpine(BusyBox):
+ """
+ This is the Alpine User manipulation class. It inherits the BusyBox class
+ behaviors such as using adduser and deluser commands.
+ """
+ platform = 'Linux'
+ distribution = 'Alpine'
+
+
+def main():
+ ssh_defaults = dict(
+ bits=0,
+ type='rsa',
+ passphrase=None,
+ comment='ansible-generated on %s' % socket.gethostname()
+ )
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str', required=True, aliases=['user']),
+ uid=dict(type='int'),
+ non_unique=dict(type='bool', default=False),
+ group=dict(type='str'),
+ groups=dict(type='list', elements='str'),
+ comment=dict(type='str'),
+ home=dict(type='path'),
+ shell=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ login_class=dict(type='str'),
+ password_expire_max=dict(type='int', no_log=False),
+ password_expire_min=dict(type='int', no_log=False),
+ # following options are specific to macOS
+ hidden=dict(type='bool'),
+ # following options are specific to selinux
+ seuser=dict(type='str'),
+ # following options are specific to userdel
+ force=dict(type='bool', default=False),
+ remove=dict(type='bool', default=False),
+ # following options are specific to useradd
+ create_home=dict(type='bool', default=True, aliases=['createhome']),
+ skeleton=dict(type='str'),
+ system=dict(type='bool', default=False),
+ # following options are specific to usermod
+ move_home=dict(type='bool', default=False),
+ append=dict(type='bool', default=False),
+ # following are specific to ssh key generation
+ generate_ssh_key=dict(type='bool'),
+ ssh_key_bits=dict(type='int', default=ssh_defaults['bits']),
+ ssh_key_type=dict(type='str', default=ssh_defaults['type']),
+ ssh_key_file=dict(type='path'),
+ ssh_key_comment=dict(type='str', default=ssh_defaults['comment']),
+ ssh_key_passphrase=dict(type='str', no_log=True),
+ update_password=dict(type='str', default='always', choices=['always', 'on_create'], no_log=False),
+ expires=dict(type='float'),
+ password_lock=dict(type='bool', no_log=False),
+ local=dict(type='bool'),
+ profile=dict(type='str'),
+ authorization=dict(type='str'),
+ role=dict(type='str'),
+ umask=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ user = User(module)
+ user.check_password_encrypted()
+
+ module.debug('User instantiated - platform %s' % user.platform)
+ if user.distribution:
+ module.debug('User instantiated - distribution %s' % user.distribution)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = user.name
+ result['state'] = user.state
+ if user.state == 'absent':
+ if user.user_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = user.remove_user()
+ if rc != 0:
+ module.fail_json(name=user.name, msg=err, rc=rc)
+ result['force'] = user.force
+ result['remove'] = user.remove
+ elif user.state == 'present':
+ if not user.user_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Check to see if the provided home path contains parent directories
+ # that do not exist.
+ path_needs_parents = False
+ if user.home and user.create_home:
+ parent = os.path.dirname(user.home)
+ if not os.path.isdir(parent):
+ path_needs_parents = True
+
+ (rc, out, err) = user.create_user()
+
+ # If the home path had parent directories that needed to be created,
+ # make sure file permissions are correct in the created home directory.
+ if path_needs_parents:
+ info = user.user_info()
+ if info is not False:
+ user.chown_homedir(info[2], info[3], user.home)
+
+ if module.check_mode:
+ result['system'] = user.name
+ else:
+ result['system'] = user.system
+ result['create_home'] = user.create_home
+ else:
+ # modify user (note: this function is check mode aware)
+ (rc, out, err) = user.modify_user()
+ result['append'] = user.append
+ result['move_home'] = user.move_home
+ if rc is not None and rc != 0:
+ module.fail_json(name=user.name, msg=err, rc=rc)
+ if user.password is not None:
+ result['password'] = 'NOT_LOGGING_PASSWORD'
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ if user.user_exists() and user.state == 'present':
+ info = user.user_info()
+ if info is False:
+ result['msg'] = "failed to look up user name: %s" % user.name
+ result['failed'] = True
+ result['uid'] = info[2]
+ result['group'] = info[3]
+ result['comment'] = info[4]
+ result['home'] = info[5]
+ result['shell'] = info[6]
+ if user.groups is not None:
+ result['groups'] = user.groups
+
+ # handle missing homedirs
+ info = user.user_info()
+ if user.home is None:
+ user.home = info[5]
+ if not os.path.exists(user.home) and user.create_home:
+ if not module.check_mode:
+ user.create_homedir(user.home)
+ user.chown_homedir(info[2], info[3], user.home)
+ result['changed'] = True
+
+ # deal with ssh key
+ if user.sshkeygen:
+ # generate ssh key (note: this function is check mode aware)
+ (rc, out, err) = user.ssh_key_gen()
+ if rc is not None and rc != 0:
+ module.fail_json(name=user.name, msg=err, rc=rc)
+ if rc == 0:
+ result['changed'] = True
+ (rc, out, err) = user.ssh_key_fingerprint()
+ if rc == 0:
+ result['ssh_fingerprint'] = out.strip()
+ else:
+ result['ssh_fingerprint'] = err.strip()
+ result['ssh_key_file'] = user.get_ssh_key_path()
+ result['ssh_public_key'] = user.get_ssh_public_key()
+
+ (rc, out, err) = user.set_password_expire()
+ if rc is None:
+ pass # target state reached, nothing to do
+ else:
+ if rc != 0:
+ module.fail_json(name=user.name, msg=err, rc=rc)
+ else:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+# import module snippets
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/validate_argument_spec.py b/lib/ansible/modules/validate_argument_spec.py
new file mode 100644
index 0000000..e223c94
--- /dev/null
+++ b/lib/ansible/modules/validate_argument_spec.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: validate_argument_spec
+short_description: Validate role argument specs.
+description:
+ - This module validates role arguments with a defined argument specification.
+version_added: "2.11"
+options:
+ argument_spec:
+ description:
+ - A dictionary like AnsibleModule argument_spec
+ required: true
+ provided_arguments:
+ description:
+ - A dictionary of the arguments that will be validated according to argument_spec
+author:
+ - Ansible Core Team
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ become:
+ support: none
+ bypass_host_loop:
+ support: none
+ connection:
+ support: none
+ check_mode:
+ support: full
+ delegation:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ platforms: all
+'''
+
+EXAMPLES = r'''
+- name: verify vars needed for this task file are present when included
+ ansible.builtin.validate_argument_spec:
+ argument_spec: '{{required_data}}'
+ vars:
+ required_data:
+ # unlike spec file, just put the options in directly
+ stuff:
+ description: stuff
+ type: str
+ choices: ['who', 'knows', 'what']
+ default: what
+ but:
+ description: i guess we need one
+ type: str
+ required: true
+
+
+- name: verify vars needed for this task file are present when included, with spec from a spec file
+ ansible.builtin.validate_argument_spec:
+ argument_spec: "{{lookup('ansible.builtin.file', 'myargspec.yml')['specname']['options']}}"
+
+
+- name: verify vars needed for next include and not from inside it, also with params i'll only define there
+ block:
+ - ansible.builtin.validate_argument_spec:
+ argument_spec: "{{lookup('ansible.builtin.file', 'nakedoptions.yml'}}"
+ provided_arguments:
+ but: "that i can define on the include itself, like in it's C(vars:) keyword"
+
+ - name: the include itself
+ vars:
+ stuff: knows
+ but: nobuts!
+'''
+
+RETURN = r'''
+argument_errors:
+ description: A list of arg validation errors.
+ returned: failure
+ type: list
+ elements: str
+ sample:
+ - "error message 1"
+ - "error message 2"
+
+argument_spec_data:
+ description: A dict of the data from the 'argument_spec' arg.
+ returned: failure
+ type: dict
+ sample:
+ some_arg:
+ type: "str"
+ some_other_arg:
+ type: "int"
+ required: true
+
+validate_args_context:
+ description: A dict of info about where validate_args_spec was used
+ type: dict
+ returned: always
+ sample:
+ name: my_role
+ type: role
+ path: /home/user/roles/my_role/
+ argument_spec_name: main
+'''
diff --git a/lib/ansible/modules/wait_for.py b/lib/ansible/modules/wait_for.py
new file mode 100644
index 0000000..ada2e80
--- /dev/null
+++ b/lib/ansible/modules/wait_for.py
@@ -0,0 +1,689 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: wait_for
+short_description: Waits for a condition before continuing
+description:
+ - You can wait for a set amount of time C(timeout), this is the default if nothing is specified or just C(timeout) is specified.
+ This does not produce an error.
+ - Waiting for a port to become available is useful for when services are not immediately available after their init scripts return
+ which is true of certain Java application servers.
+ - It is also useful when starting guests with the M(community.libvirt.virt) module and needing to pause until they are ready.
+ - This module can also be used to wait for a regex match a string to be present in a file.
+ - In Ansible 1.6 and later, this module can also be used to wait for a file to be available or
+ absent on the filesystem.
+ - In Ansible 1.8 and later, this module can also be used to wait for active connections to be closed before continuing, useful if a node
+ is being rotated out of a load balancer pool.
+ - For Windows targets, use the M(ansible.windows.win_wait_for) module instead.
+version_added: "0.7"
+options:
+ host:
+ description:
+ - A resolvable hostname or IP address to wait for.
+ type: str
+ default: 127.0.0.1
+ timeout:
+ description:
+ - Maximum number of seconds to wait for, when used with another condition it will force an error.
+ - When used without other conditions it is equivalent of just sleeping.
+ type: int
+ default: 300
+ connect_timeout:
+ description:
+ - Maximum number of seconds to wait for a connection to happen before closing and retrying.
+ type: int
+ default: 5
+ delay:
+ description:
+ - Number of seconds to wait before starting to poll.
+ type: int
+ default: 0
+ port:
+ description:
+ - Port number to poll.
+ - C(path) and C(port) are mutually exclusive parameters.
+ type: int
+ active_connection_states:
+ description:
+ - The list of TCP connection states which are counted as active connections.
+ type: list
+ elements: str
+ default: [ ESTABLISHED, FIN_WAIT1, FIN_WAIT2, SYN_RECV, SYN_SENT, TIME_WAIT ]
+ version_added: "2.3"
+ state:
+ description:
+ - Either C(present), C(started), or C(stopped), C(absent), or C(drained).
+ - When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections.
+ - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing,
+ C(absent) will check that file is absent or removed.
+ type: str
+ choices: [ absent, drained, present, started, stopped ]
+ default: started
+ path:
+ description:
+ - Path to a file on the filesystem that must exist before continuing.
+ - C(path) and C(port) are mutually exclusive parameters.
+ type: path
+ version_added: "1.4"
+ search_regex:
+ description:
+ - Can be used to match a string in either a file or a socket connection.
+ - Defaults to a multiline regex.
+ type: str
+ version_added: "1.4"
+ exclude_hosts:
+ description:
+ - List of hosts or IPs to ignore when looking for active TCP connections for C(drained) state.
+ type: list
+ elements: str
+ version_added: "1.8"
+ sleep:
+ description:
+ - Number of seconds to sleep between checks.
+ - Before Ansible 2.3 this was hardcoded to 1 second.
+ type: int
+ default: 1
+ version_added: "2.3"
+ msg:
+ description:
+ - This overrides the normal error message from a failure to meet the required conditions.
+ type: str
+ version_added: "2.4"
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - The ability to use search_regex with a port connection was added in Ansible 1.7.
+ - Prior to Ansible 2.4, testing for the absence of a directory or UNIX socket did not work correctly.
+ - Prior to Ansible 2.4, testing for the presence of a file did not work correctly if the remote user did not have read access to that file.
+ - Under some circumstances when using mandatory access control, a path may always be treated as being absent even if it exists, but
+ can't be modified or created by the remote user either.
+ - When waiting for a path, symbolic links will be followed. Many other modules that manipulate files do not follow symbolic links,
+ so operations on the path using other modules may not work exactly as expected.
+seealso:
+- module: ansible.builtin.wait_for_connection
+- module: ansible.windows.win_wait_for
+- module: community.windows.win_wait_for_process
+author:
+ - Jeroen Hoekx (@jhoekx)
+ - John Jarvis (@jarv)
+ - Andrii Radyk (@AnderEnder)
+'''
+
+EXAMPLES = r'''
+- name: Sleep for 300 seconds and continue with play
+ ansible.builtin.wait_for:
+ timeout: 300
+ delegate_to: localhost
+
+- name: Wait for port 8000 to become open on the host, don't start checking for 10 seconds
+ ansible.builtin.wait_for:
+ port: 8000
+ delay: 10
+
+- name: Waits for port 8000 of any IP to close active connections, don't start checking for 10 seconds
+ ansible.builtin.wait_for:
+ host: 0.0.0.0
+ port: 8000
+ delay: 10
+ state: drained
+
+- name: Wait for port 8000 of any IP to close active connections, ignoring connections for specified hosts
+ ansible.builtin.wait_for:
+ host: 0.0.0.0
+ port: 8000
+ state: drained
+ exclude_hosts: 10.2.1.2,10.2.1.3
+
+- name: Wait until the file /tmp/foo is present before continuing
+ ansible.builtin.wait_for:
+ path: /tmp/foo
+
+- name: Wait until the string "completed" is in the file /tmp/foo before continuing
+ ansible.builtin.wait_for:
+ path: /tmp/foo
+ search_regex: completed
+
+- name: Wait until regex pattern matches in the file /tmp/foo and print the matched group
+ ansible.builtin.wait_for:
+ path: /tmp/foo
+ search_regex: completed (?P<task>\w+)
+ register: waitfor
+- ansible.builtin.debug:
+ msg: Completed {{ waitfor['match_groupdict']['task'] }}
+
+- name: Wait until the lock file is removed
+ ansible.builtin.wait_for:
+ path: /var/lock/file.lock
+ state: absent
+
+- name: Wait until the process is finished and pid was destroyed
+ ansible.builtin.wait_for:
+ path: /proc/3466/status
+ state: absent
+
+- name: Output customized message when failed
+ ansible.builtin.wait_for:
+ path: /tmp/foo
+ state: present
+ msg: Timeout to find file /tmp/foo
+
+# Do not assume the inventory_hostname is resolvable and delay 10 seconds at start
+- name: Wait 300 seconds for port 22 to become open and contain "OpenSSH"
+ ansible.builtin.wait_for:
+ port: 22
+ host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'
+ search_regex: OpenSSH
+ delay: 10
+ connection: local
+
+# Same as above but you normally have ansible_connection set in inventory, which overrides 'connection'
+- name: Wait 300 seconds for port 22 to become open and contain "OpenSSH"
+ ansible.builtin.wait_for:
+ port: 22
+ host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'
+ search_regex: OpenSSH
+ delay: 10
+ vars:
+ ansible_connection: local
+'''
+
+RETURN = r'''
+elapsed:
+ description: The number of seconds that elapsed while waiting
+ returned: always
+ type: int
+ sample: 23
+match_groups:
+ description: Tuple containing all the subgroups of the match as returned by U(https://docs.python.org/3/library/re.html#re.MatchObject.groups)
+ returned: always
+ type: list
+ sample: ['match 1', 'match 2']
+match_groupdict:
+ description: Dictionary containing all the named subgroups of the match, keyed by the subgroup name,
+ as returned by U(https://docs.python.org/3/library/re.html#re.MatchObject.groupdict)
+ returned: always
+ type: dict
+ sample:
+ {
+ 'group': 'match'
+ }
+'''
+
+import binascii
+import contextlib
+import datetime
+import errno
+import math
+import mmap
+import os
+import re
+import select
+import socket
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.sys_info import get_platform_subclass
+from ansible.module_utils._text import to_bytes
+
+
+HAS_PSUTIL = False
+PSUTIL_IMP_ERR = None
+try:
+ import psutil
+ HAS_PSUTIL = True
+ # just because we can import it on Linux doesn't mean we will use it
+except ImportError:
+ PSUTIL_IMP_ERR = traceback.format_exc()
+
+
+class TCPConnectionInfo(object):
+ """
+ This is a generic TCP Connection Info strategy class that relies
+ on the psutil module, which is not ideal for targets, but necessary
+ for cross platform support.
+
+ A subclass may wish to override some or all of these methods.
+ - _get_exclude_ips()
+ - get_active_connections()
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+ platform = 'Generic'
+ distribution = None
+
+ match_all_ips = {
+ socket.AF_INET: '0.0.0.0',
+ socket.AF_INET6: '::',
+ }
+ ipv4_mapped_ipv6_address = {
+ 'prefix': '::ffff',
+ 'match_all': '::ffff:0.0.0.0'
+ }
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(TCPConnectionInfo)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.ips = _convert_host_to_ip(module.params['host'])
+ self.port = int(self.module.params['port'])
+ self.exclude_ips = self._get_exclude_ips()
+ if not HAS_PSUTIL:
+ module.fail_json(msg=missing_required_lib('psutil'), exception=PSUTIL_IMP_ERR)
+
+ def _get_exclude_ips(self):
+ exclude_hosts = self.module.params['exclude_hosts']
+ exclude_ips = []
+ if exclude_hosts is not None:
+ for host in exclude_hosts:
+ exclude_ips.extend(_convert_host_to_ip(host))
+ return exclude_ips
+
+ def get_active_connections_count(self):
+ active_connections = 0
+ for p in psutil.process_iter():
+ try:
+ if hasattr(p, 'get_connections'):
+ connections = p.get_connections(kind='inet')
+ else:
+ connections = p.connections(kind='inet')
+ except psutil.Error:
+ # Process is Zombie or other error state
+ continue
+ for conn in connections:
+ if conn.status not in self.module.params['active_connection_states']:
+ continue
+ if hasattr(conn, 'local_address'):
+ (local_ip, local_port) = conn.local_address
+ else:
+ (local_ip, local_port) = conn.laddr
+ if self.port != local_port:
+ continue
+ if hasattr(conn, 'remote_address'):
+ (remote_ip, remote_port) = conn.remote_address
+ else:
+ (remote_ip, remote_port) = conn.raddr
+ if (conn.family, remote_ip) in self.exclude_ips:
+ continue
+ if any((
+ (conn.family, local_ip) in self.ips,
+ (conn.family, self.match_all_ips[conn.family]) in self.ips,
+ local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
+ (conn.family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
+ )):
+ active_connections += 1
+ return active_connections
+
+
+# ===========================================
+# Subclass: Linux
+
+class LinuxTCPConnectionInfo(TCPConnectionInfo):
+ """
+ This is a TCP Connection Info evaluation strategy class
+ that utilizes information from Linux's procfs. While less universal,
+ does allow Linux targets to not require an additional library.
+ """
+ platform = 'Linux'
+ distribution = None
+
+ source_file = {
+ socket.AF_INET: '/proc/net/tcp',
+ socket.AF_INET6: '/proc/net/tcp6'
+ }
+ match_all_ips = {
+ socket.AF_INET: '00000000',
+ socket.AF_INET6: '00000000000000000000000000000000',
+ }
+ ipv4_mapped_ipv6_address = {
+ 'prefix': '0000000000000000FFFF0000',
+ 'match_all': '0000000000000000FFFF000000000000'
+ }
+ local_address_field = 1
+ remote_address_field = 2
+ connection_state_field = 3
+
+ def __init__(self, module):
+ self.module = module
+ self.ips = _convert_host_to_hex(module.params['host'])
+ self.port = "%0.4X" % int(module.params['port'])
+ self.exclude_ips = self._get_exclude_ips()
+
+ def _get_exclude_ips(self):
+ exclude_hosts = self.module.params['exclude_hosts']
+ exclude_ips = []
+ if exclude_hosts is not None:
+ for host in exclude_hosts:
+ exclude_ips.extend(_convert_host_to_hex(host))
+ return exclude_ips
+
+ def get_active_connections_count(self):
+ active_connections = 0
+ for family in self.source_file.keys():
+ if not os.path.isfile(self.source_file[family]):
+ continue
+ try:
+ f = open(self.source_file[family])
+ for tcp_connection in f.readlines():
+ tcp_connection = tcp_connection.strip().split()
+ if tcp_connection[self.local_address_field] == 'local_address':
+ continue
+ if (tcp_connection[self.connection_state_field] not in
+ [get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states']]):
+ continue
+ (local_ip, local_port) = tcp_connection[self.local_address_field].split(':')
+ if self.port != local_port:
+ continue
+ (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')
+ if (family, remote_ip) in self.exclude_ips:
+ continue
+ if any((
+ (family, local_ip) in self.ips,
+ (family, self.match_all_ips[family]) in self.ips,
+ local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
+ (family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
+ )):
+ active_connections += 1
+ except IOError as e:
+ pass
+ finally:
+ f.close()
+
+ return active_connections
+
+
+def _convert_host_to_ip(host):
+ """
+ Perform forward DNS resolution on host, IP will give the same IP
+
+ Args:
+ host: String with either hostname, IPv4, or IPv6 address
+
+ Returns:
+ List of tuples containing address family and IP
+ """
+ addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)
+ ips = []
+ for family, socktype, proto, canonname, sockaddr in addrinfo:
+ ip = sockaddr[0]
+ ips.append((family, ip))
+ if family == socket.AF_INET:
+ ips.append((socket.AF_INET6, "::ffff:" + ip))
+ return ips
+
+
+def _convert_host_to_hex(host):
+ """
+ Convert the provided host to the format in /proc/net/tcp*
+
+ /proc/net/tcp uses little-endian four byte hex for ipv4
+ /proc/net/tcp6 uses little-endian per 4B word for ipv6
+
+ Args:
+ host: String with either hostname, IPv4, or IPv6 address
+
+ Returns:
+ List of tuples containing address family and the
+ little-endian converted host
+ """
+ ips = []
+ if host is not None:
+ for family, ip in _convert_host_to_ip(host):
+ hexip_nf = binascii.b2a_hex(socket.inet_pton(family, ip))
+ hexip_hf = ""
+ for i in range(0, len(hexip_nf), 8):
+ ipgroup_nf = hexip_nf[i:i + 8]
+ ipgroup_hf = socket.ntohl(int(ipgroup_nf, base=16))
+ hexip_hf = "%s%08X" % (hexip_hf, ipgroup_hf)
+ ips.append((family, hexip_hf))
+ return ips
+
+
+def _timedelta_total_seconds(timedelta):
+ return (
+ timedelta.microseconds + 0.0 +
+ (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
+
+
+def get_connection_state_id(state):
+ connection_state_id = {
+ 'ESTABLISHED': '01',
+ 'SYN_SENT': '02',
+ 'SYN_RECV': '03',
+ 'FIN_WAIT1': '04',
+ 'FIN_WAIT2': '05',
+ 'TIME_WAIT': '06',
+ }
+ return connection_state_id[state]
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='127.0.0.1'),
+ timeout=dict(type='int', default=300),
+ connect_timeout=dict(type='int', default=5),
+ delay=dict(type='int', default=0),
+ port=dict(type='int'),
+ active_connection_states=dict(type='list', elements='str', default=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT']),
+ path=dict(type='path'),
+ search_regex=dict(type='str'),
+ state=dict(type='str', default='started', choices=['absent', 'drained', 'present', 'started', 'stopped']),
+ exclude_hosts=dict(type='list', elements='str'),
+ sleep=dict(type='int', default=1),
+ msg=dict(type='str'),
+ ),
+ )
+
+ host = module.params['host']
+ timeout = module.params['timeout']
+ connect_timeout = module.params['connect_timeout']
+ delay = module.params['delay']
+ port = module.params['port']
+ state = module.params['state']
+
+ path = module.params['path']
+ b_path = to_bytes(path, errors='surrogate_or_strict', nonstring='passthru')
+
+ search_regex = module.params['search_regex']
+ b_search_regex = to_bytes(search_regex, errors='surrogate_or_strict', nonstring='passthru')
+
+ msg = module.params['msg']
+
+ if search_regex is not None:
+ try:
+ b_compiled_search_re = re.compile(b_search_regex, re.MULTILINE)
+ except re.error as e:
+ module.fail_json(msg="Invalid regular expression: %s" % e)
+ else:
+ b_compiled_search_re = None
+
+ match_groupdict = {}
+ match_groups = ()
+
+ if port and path:
+ module.fail_json(msg="port and path parameter can not both be passed to wait_for", elapsed=0)
+ if path and state == 'stopped':
+ module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module", elapsed=0)
+ if path and state == 'drained':
+ module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module", elapsed=0)
+ if module.params['exclude_hosts'] is not None and state != 'drained':
+ module.fail_json(msg="exclude_hosts should only be with state=drained", elapsed=0)
+ for _connection_state in module.params['active_connection_states']:
+ try:
+ get_connection_state_id(_connection_state)
+ except Exception:
+ module.fail_json(msg="unknown active_connection_state (%s) defined" % _connection_state, elapsed=0)
+
+ start = datetime.datetime.utcnow()
+
+ if delay:
+ time.sleep(delay)
+
+ if not port and not path and state != 'drained':
+ time.sleep(timeout)
+ elif state in ['absent', 'stopped']:
+ # first wait for the stop condition
+ end = start + datetime.timedelta(seconds=timeout)
+
+ while datetime.datetime.utcnow() < end:
+ if path:
+ try:
+ if not os.access(b_path, os.F_OK):
+ break
+ except IOError:
+ break
+ elif port:
+ try:
+ s = socket.create_connection((host, port), connect_timeout)
+ s.shutdown(socket.SHUT_RDWR)
+ s.close()
+ except Exception:
+ break
+ # Conditions not yet met, wait and try again
+ time.sleep(module.params['sleep'])
+ else:
+ elapsed = datetime.datetime.utcnow() - start
+ if port:
+ module.fail_json(msg=msg or "Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds)
+ elif path:
+ module.fail_json(msg=msg or "Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds)
+
+ elif state in ['started', 'present']:
+ # wait for start condition
+ end = start + datetime.timedelta(seconds=timeout)
+ while datetime.datetime.utcnow() < end:
+ if path:
+ try:
+ os.stat(b_path)
+ except OSError as e:
+ # If anything except file not present, throw an error
+ if e.errno != 2:
+ elapsed = datetime.datetime.utcnow() - start
+ module.fail_json(msg=msg or "Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
+ # file doesn't exist yet, so continue
+ else:
+ # File exists. Are there additional things to check?
+ if not b_compiled_search_re:
+ # nope, succeed!
+ break
+ try:
+ with open(b_path, 'rb') as f:
+ with contextlib.closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as mm:
+ search = b_compiled_search_re.search(mm)
+ if search:
+ if search.groupdict():
+ match_groupdict = search.groupdict()
+ if search.groups():
+ match_groups = search.groups()
+
+ break
+ except IOError:
+ pass
+ elif port:
+ alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.utcnow()))
+ try:
+ s = socket.create_connection((host, port), min(connect_timeout, alt_connect_timeout))
+ except Exception:
+ # Failed to connect by connect_timeout. wait and try again
+ pass
+ else:
+ # Connected -- are there additional conditions?
+ if b_compiled_search_re:
+ b_data = b''
+ matched = False
+ while datetime.datetime.utcnow() < end:
+ max_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.utcnow()))
+ readable = select.select([s], [], [], max_timeout)[0]
+ if not readable:
+ # No new data. Probably means our timeout
+ # expired
+ continue
+ response = s.recv(1024)
+ if not response:
+ # Server shutdown
+ break
+ b_data += response
+ if b_compiled_search_re.search(b_data):
+ matched = True
+ break
+
+ # Shutdown the client socket
+ try:
+ s.shutdown(socket.SHUT_RDWR)
+ except socket.error as e:
+ if e.errno != errno.ENOTCONN:
+ raise
+ # else, the server broke the connection on its end, assume it's not ready
+ else:
+ s.close()
+ if matched:
+ # Found our string, success!
+ break
+ else:
+ # Connection established, success!
+ try:
+ s.shutdown(socket.SHUT_RDWR)
+ except socket.error as e:
+ if e.errno != errno.ENOTCONN:
+ raise
+ # else, the server broke the connection on its end, assume it's not ready
+ else:
+ s.close()
+ break
+
+ # Conditions not yet met, wait and try again
+ time.sleep(module.params['sleep'])
+
+ else: # while-else
+ # Timeout expired
+ elapsed = datetime.datetime.utcnow() - start
+ if port:
+ if search_regex:
+ module.fail_json(msg=msg or "Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds)
+ else:
+ module.fail_json(msg=msg or "Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds)
+ elif path:
+ if search_regex:
+ module.fail_json(msg=msg or "Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds)
+ else:
+ module.fail_json(msg=msg or "Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds)
+
+ elif state == 'drained':
+ # wait until all active connections are gone
+ end = start + datetime.timedelta(seconds=timeout)
+ tcpconns = TCPConnectionInfo(module)
+ while datetime.datetime.utcnow() < end:
+ if tcpconns.get_active_connections_count() == 0:
+ break
+
+ # Conditions not yet met, wait and try again
+ time.sleep(module.params['sleep'])
+ else:
+ elapsed = datetime.datetime.utcnow() - start
+ module.fail_json(msg=msg or "Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds)
+
+ elapsed = datetime.datetime.utcnow() - start
+ module.exit_json(state=state, port=port, search_regex=search_regex, match_groups=match_groups, match_groupdict=match_groupdict, path=path,
+ elapsed=elapsed.seconds)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/wait_for_connection.py b/lib/ansible/modules/wait_for_connection.py
new file mode 100644
index 0000000..f0eccb6
--- /dev/null
+++ b/lib/ansible/modules/wait_for_connection.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: wait_for_connection
+short_description: Waits until remote system is reachable/usable
+description:
+- Waits for a total of C(timeout) seconds.
+- Retries the transport connection after a timeout of C(connect_timeout).
+- Tests the transport connection every C(sleep) seconds.
+- This module makes use of internal ansible transport (and configuration) and the ping/win_ping module to guarantee correct end-to-end functioning.
+- This module is also supported for Windows targets.
+version_added: '2.3'
+options:
+ connect_timeout:
+ description:
+ - Maximum number of seconds to wait for a connection to happen before closing and retrying.
+ type: int
+ default: 5
+ delay:
+ description:
+ - Number of seconds to wait before starting to poll.
+ type: int
+ default: 0
+ sleep:
+ description:
+ - Number of seconds to sleep between checks.
+ type: int
+ default: 1
+ timeout:
+ description:
+ - Maximum number of seconds to wait for.
+ type: int
+ default: 600
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ details: As long as there is a connection plugin
+ platforms: all
+seealso:
+- module: ansible.builtin.wait_for
+- module: ansible.windows.win_wait_for
+- module: community.windows.win_wait_for_process
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Wait 600 seconds for target connection to become reachable/usable
+ ansible.builtin.wait_for_connection:
+
+- name: Wait 300 seconds, but only start checking after 60 seconds
+ ansible.builtin.wait_for_connection:
+ delay: 60
+ timeout: 300
+
+# Wake desktops, wait for them to become ready and continue playbook
+- hosts: all
+ gather_facts: no
+ tasks:
+ - name: Send magic Wake-On-Lan packet to turn on individual systems
+ community.general.wakeonlan:
+ mac: '{{ mac }}'
+ broadcast: 192.168.0.255
+ delegate_to: localhost
+
+ - name: Wait for system to become reachable
+ ansible.builtin.wait_for_connection:
+
+ - name: Gather facts for first time
+ ansible.builtin.setup:
+
+# Build a new VM, wait for it to become ready and continue playbook
+- hosts: all
+ gather_facts: no
+ tasks:
+ - name: Clone new VM, if missing
+ community.vmware.vmware_guest:
+ hostname: '{{ vcenter_ipaddress }}'
+ name: '{{ inventory_hostname_short }}'
+ template: Windows 2012R2
+ customization:
+ hostname: '{{ vm_shortname }}'
+ runonce:
+ - powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
+ delegate_to: localhost
+
+ - name: Wait for system to become reachable over WinRM
+ ansible.builtin.wait_for_connection:
+ timeout: 900
+
+ - name: Gather facts for first time
+ ansible.builtin.setup:
+'''
+
+RETURN = r'''
+elapsed:
+ description: The number of seconds that elapsed waiting for the connection to appear.
+ returned: always
+ type: float
+ sample: 23.1
+'''
diff --git a/lib/ansible/modules/yum.py b/lib/ansible/modules/yum.py
new file mode 100644
index 0000000..040ee27
--- /dev/null
+++ b/lib/ansible/modules/yum.py
@@ -0,0 +1,1818 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Red Hat, Inc
+# Written by Seth Vidal <skvidal at fedoraproject.org>
+# Copyright: (c) 2014, Epic Games, Inc.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: yum
+version_added: historical
+short_description: Manages packages with the I(yum) package manager
+description:
+ - Installs, upgrade, downgrades, removes, and lists packages and groups with the I(yum) package manager.
+ - This module only works on Python 2. If you require Python 3 support see the M(ansible.builtin.dnf) module.
+options:
+ use_backend:
+ description:
+ - This module supports C(yum) (as it always has), this is known as C(yum3)/C(YUM3)/C(yum-deprecated) by
+ upstream yum developers. As of Ansible 2.7+, this module also supports C(YUM4), which is the
+ "new yum" and it has an C(dnf) backend.
+ - By default, this module will select the backend based on the C(ansible_pkg_mgr) fact.
+ default: "auto"
+ choices: [ auto, yum, yum4, dnf ]
+ type: str
+ version_added: "2.7"
+ name:
+ description:
+ - A package name or package specifier with version, like C(name-1.0).
+ - Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name>=1.0)
+ - If a previous version is specified, the task also needs to turn C(allow_downgrade) on.
+ See the C(allow_downgrade) documentation for caveats with downgrading packages.
+ - When using state=latest, this can be C('*') which means run C(yum -y update).
+ - You can also pass a url or a local path to a rpm file (using state=present).
+ To operate on several packages this can accept a comma separated string of packages or (as of 2.0) a list of packages.
+ aliases: [ pkg ]
+ type: list
+ elements: str
+ exclude:
+ description:
+ - Package name(s) to exclude when state=present, or latest
+ type: list
+ elements: str
+ version_added: "2.0"
+ list:
+ description:
+ - "Package name to run the equivalent of C(yum list --show-duplicates <package>) against. In addition to listing packages,
+ use can also list the following: C(installed), C(updates), C(available) and C(repos)."
+ - This parameter is mutually exclusive with I(name).
+ type: str
+ state:
+ description:
+ - Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package.
+ - C(present) and C(installed) will simply ensure that a desired package is installed.
+ - C(latest) will update the specified package if it's not of the latest available version.
+ - C(absent) and C(removed) will remove the specified package.
+ - Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
+ enabled for this module, then C(absent) is inferred.
+ type: str
+ choices: [ absent, installed, latest, present, removed ]
+ enablerepo:
+ description:
+ - I(Repoid) of repositories to enable for the install/update operation.
+ These repos will not persist beyond the transaction.
+ When specifying multiple repos, separate them with a C(",").
+ - As of Ansible 2.7, this can alternatively be a list instead of C(",")
+ separated string
+ type: list
+ elements: str
+ version_added: "0.9"
+ disablerepo:
+ description:
+ - I(Repoid) of repositories to disable for the install/update operation.
+ These repos will not persist beyond the transaction.
+ When specifying multiple repos, separate them with a C(",").
+ - As of Ansible 2.7, this can alternatively be a list instead of C(",")
+ separated string
+ type: list
+ elements: str
+ version_added: "0.9"
+ conf_file:
+ description:
+ - The remote yum configuration file to use for the transaction.
+ type: str
+ version_added: "0.6"
+ disable_gpg_check:
+ description:
+ - Whether to disable the GPG checking of signatures of packages being
+ installed. Has an effect only if state is I(present) or I(latest).
+ type: bool
+ default: "no"
+ version_added: "1.2"
+ skip_broken:
+ description:
+ - Skip all unavailable packages or packages with broken dependencies
+ without raising an error. Equivalent to passing the --skip-broken option.
+ type: bool
+ default: "no"
+ version_added: "2.3"
+ update_cache:
+ description:
+ - Force yum to check if cache is out of date and redownload if needed.
+ Has an effect only if state is I(present) or I(latest).
+ type: bool
+ default: "no"
+ aliases: [ expire-cache ]
+ version_added: "1.9"
+ validate_certs:
+ description:
+ - This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(false), the SSL certificates will not be validated.
+ - This should only set to C(false) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
+ - Prior to 2.1 the code worked as if this was set to C(true).
+ type: bool
+ default: "yes"
+ version_added: "2.1"
+ sslverify:
+ description:
+ - Disables SSL validation of the repository server for this transaction.
+ - This should be set to C(false) if one of the configured repositories is using an untrusted or self-signed certificate.
+ type: bool
+ default: "yes"
+ version_added: "2.13"
+ update_only:
+ description:
+ - When using latest, only update installed packages. Do not install packages.
+ - Has an effect only if state is I(latest)
+ default: "no"
+ type: bool
+ version_added: "2.5"
+
+ installroot:
+ description:
+ - Specifies an alternative installroot, relative to which all packages
+ will be installed.
+ default: "/"
+ type: str
+ version_added: "2.3"
+ security:
+ description:
+ - If set to C(true), and C(state=latest) then only installs updates that have been marked security related.
+ type: bool
+ default: "no"
+ version_added: "2.4"
+ bugfix:
+ description:
+ - If set to C(true), and C(state=latest) then only installs updates that have been marked bugfix related.
+ default: "no"
+ type: bool
+ version_added: "2.6"
+ allow_downgrade:
+ description:
+ - Specify if the named package and version is allowed to downgrade
+ a maybe already installed higher version of that package.
+ Note that setting allow_downgrade=True can make this module
+ behave in a non-idempotent way. The task could end up with a set
+ of packages that does not match the complete list of specified
+ packages to install (because dependencies between the downgraded
+ package and others can cause changes to the packages which were
+ in the earlier transaction).
+ type: bool
+ default: "no"
+ version_added: "2.4"
+ enable_plugin:
+ description:
+ - I(Plugin) name to enable for the install/update operation.
+ The enabled plugin will not persist beyond the transaction.
+ type: list
+ elements: str
+ version_added: "2.5"
+ disable_plugin:
+ description:
+ - I(Plugin) name to disable for the install/update operation.
+ The disabled plugins will not persist beyond the transaction.
+ type: list
+ elements: str
+ version_added: "2.5"
+ releasever:
+ description:
+ - Specifies an alternative release from which all packages will be
+ installed.
+ type: str
+ version_added: "2.7"
+ autoremove:
+ description:
+ - If C(true), removes all "leaf" packages from the system that were originally
+ installed as dependencies of user-installed packages but which are no longer
+ required by any such package. Should be used alone or when state is I(absent)
+ - "NOTE: This feature requires yum >= 3.4.3 (RHEL/CentOS 7+)"
+ type: bool
+ default: "no"
+ version_added: "2.7"
+ disable_excludes:
+ description:
+ - Disable the excludes defined in YUM config files.
+ - If set to C(all), disables all excludes.
+ - If set to C(main), disable excludes defined in [main] in yum.conf.
+ - If set to C(repoid), disable excludes defined for given repo id.
+ type: str
+ version_added: "2.7"
+ download_only:
+ description:
+ - Only download the packages, do not install them.
+ default: "no"
+ type: bool
+ version_added: "2.7"
+ lock_timeout:
+ description:
+ - Amount of time to wait for the yum lockfile to be freed.
+ required: false
+ default: 30
+ type: int
+ version_added: "2.8"
+ install_weak_deps:
+ description:
+ - Will also install all packages linked by a weak dependency relation.
+ - "NOTE: This feature requires yum >= 4 (RHEL/CentOS 8+)"
+ type: bool
+ default: "yes"
+ version_added: "2.8"
+ download_dir:
+ description:
+ - Specifies an alternate directory to store packages.
+ - Has an effect only if I(download_only) is specified.
+ type: str
+ version_added: "2.8"
+ install_repoquery:
+ description:
+ - If repoquery is not available, install yum-utils. If the system is
+ registered to RHN or an RHN Satellite, repoquery allows for querying
+ all channels assigned to the system. It is also required to use the
+ 'list' parameter.
+ - "NOTE: This will run and be logged as a separate yum transation which
+ takes place before any other installation or removal."
+ - "NOTE: This will use the system's default enabled repositories without
+ regard for disablerepo/enablerepo given to the module."
+ required: false
+ version_added: "1.5"
+ default: "yes"
+ type: bool
+ cacheonly:
+ description:
+ - Tells yum to run entirely from system cache; does not download or update metadata.
+ default: "no"
+ type: bool
+ version_added: "2.12"
+extends_documentation_fragment:
+- action_common_attributes
+- action_common_attributes.flow
+attributes:
+ action:
+ details: In the case of yum, it has 2 action plugins that use it under the hood, M(ansible.builtin.yum) and M(ansible.builtin.package).
+ support: partial
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: rhel
+notes:
+ - When used with a C(loop:) each package will be processed individually,
+ it is much more efficient to pass the list directly to the I(name) option.
+ - In versions prior to 1.9.2 this module installed and removed each package
+ given to the yum module separately. This caused problems when packages
+ specified by filename or url had to be installed or removed together. In
+ 1.9.2 this was fixed so that packages are installed in one yum
+ transaction. However, if one of the packages adds a new yum repository
+ that the other packages come from (such as epel-release) then that package
+ needs to be installed in a separate task. This mimics yum's command line
+ behaviour.
+ - 'Yum itself has two types of groups. "Package groups" are specified in the
+ rpm itself while "environment groups" are specified in a separate file
+ (usually by the distribution). Unfortunately, this division becomes
+ apparent to ansible users because ansible needs to operate on the group
+ of packages in a single transaction and yum requires groups to be specified
+ in different ways when used in that way. Package groups are specified as
+ "@development-tools" and environment groups are "@^gnome-desktop-environment".
+ Use the "yum group list hidden ids" command to see which category of group the group
+ you want to install falls into.'
+ - 'The yum module does not support clearing yum cache in an idempotent way, so it
+ was decided not to implement it, the only method is to use command and call the yum
+ command directly, namely "command: yum clean all"
+ https://github.com/ansible/ansible/pull/31450#issuecomment-352889579'
+# informational: requirements for nodes
+requirements:
+- yum
+author:
+ - Ansible Core Team
+ - Seth Vidal (@skvidal)
+ - Eduard Snesarev (@verm666)
+ - Berend De Schouwer (@berenddeschouwer)
+ - Abhijeet Kasurde (@Akasurde)
+ - Adam Miller (@maxamillion)
+'''
+
+EXAMPLES = '''
+- name: Install the latest version of Apache
+ ansible.builtin.yum:
+ name: httpd
+ state: latest
+
+- name: Install Apache >= 2.4
+ ansible.builtin.yum:
+ name: httpd>=2.4
+ state: present
+
+- name: Install a list of packages (suitable replacement for 2.11 loop deprecation warning)
+ ansible.builtin.yum:
+ name:
+ - nginx
+ - postgresql
+ - postgresql-server
+ state: present
+
+- name: Install a list of packages with a list variable
+ ansible.builtin.yum:
+ name: "{{ packages }}"
+ vars:
+ packages:
+ - httpd
+ - httpd-tools
+
+- name: Remove the Apache package
+ ansible.builtin.yum:
+ name: httpd
+ state: absent
+
+- name: Install the latest version of Apache from the testing repo
+ ansible.builtin.yum:
+ name: httpd
+ enablerepo: testing
+ state: present
+
+- name: Install one specific version of Apache
+ ansible.builtin.yum:
+ name: httpd-2.2.29-1.4.amzn1
+ state: present
+
+- name: Upgrade all packages
+ ansible.builtin.yum:
+ name: '*'
+ state: latest
+
+- name: Upgrade all packages, excluding kernel & foo related packages
+ ansible.builtin.yum:
+ name: '*'
+ state: latest
+ exclude: kernel*,foo*
+
+- name: Install the nginx rpm from a remote repo
+ ansible.builtin.yum:
+ name: http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm
+ state: present
+
+- name: Install nginx rpm from a local file
+ ansible.builtin.yum:
+ name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
+ state: present
+
+- name: Install the 'Development tools' package group
+ ansible.builtin.yum:
+ name: "@Development tools"
+ state: present
+
+- name: Install the 'Gnome desktop' environment group
+ ansible.builtin.yum:
+ name: "@^gnome-desktop-environment"
+ state: present
+
+- name: List ansible packages and register result to print with debug later
+ ansible.builtin.yum:
+ list: ansible
+ register: result
+
+- name: Install package with multiple repos enabled
+ ansible.builtin.yum:
+ name: sos
+ enablerepo: "epel,ol7_latest"
+
+- name: Install package with multiple repos disabled
+ ansible.builtin.yum:
+ name: sos
+ disablerepo: "epel,ol7_latest"
+
+- name: Download the nginx package but do not install it
+ ansible.builtin.yum:
+ name:
+ - nginx
+ state: latest
+ download_only: true
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.respawn import has_respawned, respawn_module
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
+
+import errno
+import os
+import re
+import sys
+import tempfile
+
+try:
+ import rpm
+ HAS_RPM_PYTHON = True
+except ImportError:
+ HAS_RPM_PYTHON = False
+
+try:
+ import yum
+ HAS_YUM_PYTHON = True
+except ImportError:
+ HAS_YUM_PYTHON = False
+
+try:
+ from yum.misc import find_unfinished_transactions, find_ts_remaining
+ from rpmUtils.miscutils import splitFilename, compareEVR
+ transaction_helpers = True
+except ImportError:
+ transaction_helpers = False
+
+from contextlib import contextmanager
+from ansible.module_utils.urls import fetch_file
+
+def_qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}"
+rpmbin = None
+
+
+class YumModule(YumDnf):
+ """
+ Yum Ansible module back-end implementation
+ """
+
+ def __init__(self, module):
+
+ # state=installed name=pkgspec
+ # state=removed name=pkgspec
+ # state=latest name=pkgspec
+ #
+ # informational commands:
+ # list=installed
+ # list=updates
+ # list=available
+ # list=repos
+ # list=pkgspec
+
+ # This populates instance vars for all argument spec params
+ super(YumModule, self).__init__(module)
+
+ self.pkg_mgr_name = "yum"
+ self.lockfile = '/var/run/yum.pid'
+ self._yum_base = None
+
+ def _enablerepos_with_error_checking(self):
+ # NOTE: This seems unintuitive, but it mirrors yum's CLI behavior
+ if len(self.enablerepo) == 1:
+ try:
+ self.yum_base.repos.enableRepo(self.enablerepo[0])
+ except yum.Errors.YumBaseError as e:
+ if u'repository not found' in to_text(e):
+ self.module.fail_json(msg="Repository %s not found." % self.enablerepo[0])
+ else:
+ raise e
+ else:
+ for rid in self.enablerepo:
+ try:
+ self.yum_base.repos.enableRepo(rid)
+ except yum.Errors.YumBaseError as e:
+ if u'repository not found' in to_text(e):
+ self.module.warn("Repository %s not found." % rid)
+ else:
+ raise e
+
+ def is_lockfile_pid_valid(self):
+ try:
+ try:
+ with open(self.lockfile, 'r') as f:
+ oldpid = int(f.readline())
+ except ValueError:
+ # invalid data
+ os.unlink(self.lockfile)
+ return False
+
+ if oldpid == os.getpid():
+ # that's us?
+ os.unlink(self.lockfile)
+ return False
+
+ try:
+ with open("/proc/%d/stat" % oldpid, 'r') as f:
+ stat = f.readline()
+
+ if stat.split()[2] == 'Z':
+ # Zombie
+ os.unlink(self.lockfile)
+ return False
+ except IOError:
+ # either /proc is not mounted or the process is already dead
+ try:
+ # check the state of the process
+ os.kill(oldpid, 0)
+ except OSError as e:
+ if e.errno == errno.ESRCH:
+ # No such process
+ os.unlink(self.lockfile)
+ return False
+
+ self.module.fail_json(msg="Unable to check PID %s in %s: %s" % (oldpid, self.lockfile, to_native(e)))
+ except (IOError, OSError) as e:
+ # lockfile disappeared?
+ return False
+
+ # another copy seems to be running
+ return True
+
+ @property
+ def yum_base(self):
+ if self._yum_base:
+ return self._yum_base
+ else:
+ # Only init once
+ self._yum_base = yum.YumBase()
+ self._yum_base.preconf.debuglevel = 0
+ self._yum_base.preconf.errorlevel = 0
+ self._yum_base.preconf.plugins = True
+ self._yum_base.preconf.enabled_plugins = self.enable_plugin
+ self._yum_base.preconf.disabled_plugins = self.disable_plugin
+ if self.releasever:
+ self._yum_base.preconf.releasever = self.releasever
+ if self.installroot != '/':
+ # do not setup installroot by default, because of error
+ # CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
+ # in old yum version (like in CentOS 6.6)
+ self._yum_base.preconf.root = self.installroot
+ self._yum_base.conf.installroot = self.installroot
+ if self.conf_file and os.path.exists(self.conf_file):
+ self._yum_base.preconf.fn = self.conf_file
+ if os.geteuid() != 0:
+ if hasattr(self._yum_base, 'setCacheDir'):
+ self._yum_base.setCacheDir()
+ else:
+ cachedir = yum.misc.getCacheDir()
+ self._yum_base.repos.setCacheDir(cachedir)
+ self._yum_base.conf.cache = 0
+ if self.disable_excludes:
+ self._yum_base.conf.disable_excludes = self.disable_excludes
+
+ # setting conf.sslverify allows retrieving the repo's metadata
+ # without validating the certificate, but that does not allow
+ # package installation from a bad-ssl repo.
+ self._yum_base.conf.sslverify = self.sslverify
+
+ # A sideeffect of accessing conf is that the configuration is
+ # loaded and plugins are discovered
+ self.yum_base.conf
+
+ try:
+ for rid in self.disablerepo:
+ self.yum_base.repos.disableRepo(rid)
+
+ self._enablerepos_with_error_checking()
+
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ return self._yum_base
+
+ def po_to_envra(self, po):
+ if hasattr(po, 'ui_envra'):
+ return po.ui_envra
+
+ return '%s:%s-%s-%s.%s' % (po.epoch, po.name, po.version, po.release, po.arch)
+
+ def is_group_env_installed(self, name):
+ name_lower = name.lower()
+
+ if yum.__version_info__ >= (3, 4):
+ groups_list = self.yum_base.doGroupLists(return_evgrps=True)
+ else:
+ groups_list = self.yum_base.doGroupLists()
+
+ # list of the installed groups on the first index
+ groups = groups_list[0]
+ for group in groups:
+ if name_lower.endswith(group.name.lower()) or name_lower.endswith(group.groupid.lower()):
+ return True
+
+ if yum.__version_info__ >= (3, 4):
+ # list of the installed env_groups on the third index
+ envs = groups_list[2]
+ for env in envs:
+ if name_lower.endswith(env.name.lower()) or name_lower.endswith(env.environmentid.lower()):
+ return True
+
+ return False
+
+ def is_installed(self, repoq, pkgspec, qf=None, is_pkg=False):
+ if qf is None:
+ qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}\n"
+
+ if not repoq:
+ pkgs = []
+ try:
+ e, m, _ = self.yum_base.rpmdb.matchPackageNames([pkgspec])
+ pkgs = e + m
+ if not pkgs and not is_pkg:
+ pkgs.extend(self.yum_base.returnInstalledPackagesByDep(pkgspec))
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ return [self.po_to_envra(p) for p in pkgs]
+
+ else:
+ global rpmbin
+ if not rpmbin:
+ rpmbin = self.module.get_bin_path('rpm', required=True)
+
+ cmd = [rpmbin, '-q', '--qf', qf, pkgspec]
+ if '*' in pkgspec:
+ cmd.append('-a')
+ if self.installroot != '/':
+ cmd.extend(['--root', self.installroot])
+ # rpm localizes messages and we're screen scraping so make sure we use
+ # an appropriate locale
+ locale = get_best_parsable_locale(self.module)
+ lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
+ rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
+ if rc != 0 and 'is not installed' not in out:
+ self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err))
+ if 'is not installed' in out:
+ out = ''
+
+ pkgs = [p for p in out.replace('(none)', '0').split('\n') if p.strip()]
+ if not pkgs and not is_pkg:
+ cmd = [rpmbin, '-q', '--qf', qf, '--whatprovides', pkgspec]
+ if self.installroot != '/':
+ cmd.extend(['--root', self.installroot])
+ rc2, out2, err2 = self.module.run_command(cmd, environ_update=lang_env)
+ else:
+ rc2, out2, err2 = (0, '', '')
+
+ if rc2 != 0 and 'no package provides' not in out2:
+ self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err + err2))
+ if 'no package provides' in out2:
+ out2 = ''
+ pkgs += [p for p in out2.replace('(none)', '0').split('\n') if p.strip()]
+ return pkgs
+
+ return []
+
+ def is_available(self, repoq, pkgspec, qf=def_qf):
+ if not repoq:
+
+ pkgs = []
+ try:
+ e, m, _ = self.yum_base.pkgSack.matchPackageNames([pkgspec])
+ pkgs = e + m
+ if not pkgs:
+ pkgs.extend(self.yum_base.returnPackagesByDep(pkgspec))
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ return [self.po_to_envra(p) for p in pkgs]
+
+ else:
+ myrepoq = list(repoq)
+
+ r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
+ myrepoq.extend(r_cmd)
+
+ r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
+ myrepoq.extend(r_cmd)
+
+ if self.releasever:
+ myrepoq.extend('--releasever=%s' % self.releasever)
+
+ cmd = myrepoq + ["--qf", qf, pkgspec]
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0:
+ return [p for p in out.split('\n') if p.strip()]
+ else:
+ self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
+
+ return []
+
+ def is_update(self, repoq, pkgspec, qf=def_qf):
+ if not repoq:
+
+ pkgs = []
+ updates = []
+
+ try:
+ pkgs = self.yum_base.returnPackagesByDep(pkgspec) + \
+ self.yum_base.returnInstalledPackagesByDep(pkgspec)
+ if not pkgs:
+ e, m, _ = self.yum_base.pkgSack.matchPackageNames([pkgspec])
+ pkgs = e + m
+ updates = self.yum_base.doPackageLists(pkgnarrow='updates').updates
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ retpkgs = (pkg for pkg in pkgs if pkg in updates)
+
+ return set(self.po_to_envra(p) for p in retpkgs)
+
+ else:
+ myrepoq = list(repoq)
+ r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
+ myrepoq.extend(r_cmd)
+
+ r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
+ myrepoq.extend(r_cmd)
+
+ if self.releasever:
+ myrepoq.extend('--releasever=%s' % self.releasever)
+
+ cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ return set(p for p in out.split('\n') if p.strip())
+ else:
+ self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
+
+ return set()
+
+ def what_provides(self, repoq, req_spec, qf=def_qf):
+ if not repoq:
+
+ pkgs = []
+ try:
+ try:
+ pkgs = self.yum_base.returnPackagesByDep(req_spec) + \
+ self.yum_base.returnInstalledPackagesByDep(req_spec)
+ except Exception as e:
+ # If a repo with `repo_gpgcheck=1` is added and the repo GPG
+ # key was never accepted, querying this repo will throw an
+ # error: 'repomd.xml signature could not be verified'. In that
+ # situation we need to run `yum -y makecache fast` which will accept
+ # the key and try again.
+ if 'repomd.xml signature could not be verified' in to_native(e):
+ if self.releasever:
+ self.module.run_command(self.yum_basecmd + ['makecache', 'fast', '--releasever=%s' % self.releasever])
+ else:
+ self.module.run_command(self.yum_basecmd + ['makecache', 'fast'])
+ pkgs = self.yum_base.returnPackagesByDep(req_spec) + \
+ self.yum_base.returnInstalledPackagesByDep(req_spec)
+ else:
+ raise
+ if not pkgs:
+ exact_matches, glob_matches = self.yum_base.pkgSack.matchPackageNames([req_spec])[0:2]
+ pkgs.extend(exact_matches)
+ pkgs.extend(glob_matches)
+ exact_matches, glob_matches = self.yum_base.rpmdb.matchPackageNames([req_spec])[0:2]
+ pkgs.extend(exact_matches)
+ pkgs.extend(glob_matches)
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ return set(self.po_to_envra(p) for p in pkgs)
+
+ else:
+ myrepoq = list(repoq)
+ r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
+ myrepoq.extend(r_cmd)
+
+ r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
+ myrepoq.extend(r_cmd)
+
+ if self.releasever:
+ myrepoq.extend('--releasever=%s' % self.releasever)
+
+ cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec]
+ rc, out, err = self.module.run_command(cmd)
+ cmd = myrepoq + ["--qf", qf, req_spec]
+ rc2, out2, err2 = self.module.run_command(cmd)
+ if rc == 0 and rc2 == 0:
+ out += out2
+ pkgs = {p for p in out.split('\n') if p.strip()}
+ if not pkgs:
+ pkgs = self.is_installed(repoq, req_spec, qf=qf)
+ return pkgs
+ else:
+ self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
+
+ return set()
+
+ def transaction_exists(self, pkglist):
+ """
+ checks the package list to see if any packages are
+ involved in an incomplete transaction
+ """
+
+ conflicts = []
+ if not transaction_helpers:
+ return conflicts
+
+ # first, we create a list of the package 'nvreas'
+ # so we can compare the pieces later more easily
+ pkglist_nvreas = (splitFilename(pkg) for pkg in pkglist)
+
+ # next, we build the list of packages that are
+ # contained within an unfinished transaction
+ unfinished_transactions = find_unfinished_transactions()
+ for trans in unfinished_transactions:
+ steps = find_ts_remaining(trans)
+ for step in steps:
+ # the action is install/erase/etc., but we only
+ # care about the package spec contained in the step
+ (action, step_spec) = step
+ (n, v, r, e, a) = splitFilename(step_spec)
+ # and see if that spec is in the list of packages
+ # requested for installation/updating
+ for pkg in pkglist_nvreas:
+ # if the name and arch match, we're going to assume
+ # this package is part of a pending transaction
+ # the label is just for display purposes
+ label = "%s-%s" % (n, a)
+ if n == pkg[0] and a == pkg[4]:
+ if label not in conflicts:
+ conflicts.append("%s-%s" % (n, a))
+ break
+ return conflicts
+
+ def local_envra(self, path):
+ """return envra of a local rpm passed in"""
+
+ ts = rpm.TransactionSet()
+ ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
+ fd = os.open(path, os.O_RDONLY)
+ try:
+ header = ts.hdrFromFdno(fd)
+ except rpm.error as e:
+ return None
+ finally:
+ os.close(fd)
+
+ return '%s:%s-%s-%s.%s' % (
+ header[rpm.RPMTAG_EPOCH] or '0',
+ header[rpm.RPMTAG_NAME],
+ header[rpm.RPMTAG_VERSION],
+ header[rpm.RPMTAG_RELEASE],
+ header[rpm.RPMTAG_ARCH]
+ )
+
+ @contextmanager
+ def set_env_proxy(self):
+ # setting system proxy environment and saving old, if exists
+ namepass = ""
+ scheme = ["http", "https"]
+ old_proxy_env = [os.getenv("http_proxy"), os.getenv("https_proxy")]
+ try:
+ # "_none_" is a special value to disable proxy in yum.conf/*.repo
+ if self.yum_base.conf.proxy and self.yum_base.conf.proxy not in ("_none_",):
+ if self.yum_base.conf.proxy_username:
+ namepass = namepass + self.yum_base.conf.proxy_username
+ proxy_url = self.yum_base.conf.proxy
+ if self.yum_base.conf.proxy_password:
+ namepass = namepass + ":" + self.yum_base.conf.proxy_password
+ elif '@' in self.yum_base.conf.proxy:
+ namepass = self.yum_base.conf.proxy.split('@')[0].split('//')[-1]
+ proxy_url = self.yum_base.conf.proxy.replace("{0}@".format(namepass), "")
+
+ if namepass:
+ namepass = namepass + '@'
+ for item in scheme:
+ os.environ[item + "_proxy"] = re.sub(
+ r"(http://)",
+ r"\g<1>" + namepass, proxy_url
+ )
+ else:
+ for item in scheme:
+ os.environ[item + "_proxy"] = self.yum_base.conf.proxy
+ yield
+ except yum.Errors.YumBaseError:
+ raise
+ finally:
+ # revert back to previously system configuration
+ for item in scheme:
+ if os.getenv("{0}_proxy".format(item)):
+ del os.environ["{0}_proxy".format(item)]
+ if old_proxy_env[0]:
+ os.environ["http_proxy"] = old_proxy_env[0]
+ if old_proxy_env[1]:
+ os.environ["https_proxy"] = old_proxy_env[1]
+
+ def pkg_to_dict(self, pkgstr):
+ if pkgstr.strip() and pkgstr.count('|') == 5:
+ n, e, v, r, a, repo = pkgstr.split('|')
+ else:
+ return {'error_parsing': pkgstr}
+
+ d = {
+ 'name': n,
+ 'arch': a,
+ 'epoch': e,
+ 'release': r,
+ 'version': v,
+ 'repo': repo,
+ 'envra': '%s:%s-%s-%s.%s' % (e, n, v, r, a)
+ }
+
+ if repo == 'installed':
+ d['yumstate'] = 'installed'
+ else:
+ d['yumstate'] = 'available'
+
+ return d
+
+ def repolist(self, repoq, qf="%{repoid}"):
+ cmd = repoq + ["--qf", qf, "-a"]
+ if self.releasever:
+ cmd.extend(['--releasever=%s' % self.releasever])
+ rc, out, _ = self.module.run_command(cmd)
+ if rc == 0:
+ return set(p for p in out.split('\n') if p.strip())
+ else:
+ return []
+
+ def list_stuff(self, repoquerybin, stuff):
+
+ qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
+ # is_installed goes through rpm instead of repoquery so it needs a slightly different format
+ is_installed_qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|installed\n"
+ repoq = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
+ if self.disablerepo:
+ repoq.extend(['--disablerepo', ','.join(self.disablerepo)])
+ if self.enablerepo:
+ repoq.extend(['--enablerepo', ','.join(self.enablerepo)])
+ if self.installroot != '/':
+ repoq.extend(['--installroot', self.installroot])
+ if self.conf_file and os.path.exists(self.conf_file):
+ repoq += ['-c', self.conf_file]
+
+ if stuff == 'installed':
+ return [self.pkg_to_dict(p) for p in sorted(self.is_installed(repoq, '-a', qf=is_installed_qf)) if p.strip()]
+
+ if stuff == 'updates':
+ return [self.pkg_to_dict(p) for p in sorted(self.is_update(repoq, '-a', qf=qf)) if p.strip()]
+
+ if stuff == 'available':
+ return [self.pkg_to_dict(p) for p in sorted(self.is_available(repoq, '-a', qf=qf)) if p.strip()]
+
+ if stuff == 'repos':
+ return [dict(repoid=name, state='enabled') for name in sorted(self.repolist(repoq)) if name.strip()]
+
+ return [
+ self.pkg_to_dict(p) for p in
+ sorted(self.is_installed(repoq, stuff, qf=is_installed_qf) + self.is_available(repoq, stuff, qf=qf))
+ if p.strip()
+ ]
+
+ def exec_install(self, items, action, pkgs, res):
+ cmd = self.yum_basecmd + [action] + pkgs
+ if self.releasever:
+ cmd.extend(['--releasever=%s' % self.releasever])
+
+ # setting sslverify using --setopt is required as conf.sslverify only
+ # affects the metadata retrieval.
+ if not self.sslverify:
+ cmd.extend(['--setopt', 'sslverify=0'])
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, results=res['results'], changes=dict(installed=pkgs))
+ else:
+ res['changes'] = dict(installed=pkgs)
+
+ locale = get_best_parsable_locale(self.module)
+ lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
+ rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
+
+ if rc == 1:
+ for spec in items:
+ # Fail on invalid urls:
+ if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
+ err = 'Package at %s could not be installed' % spec
+ self.module.fail_json(changed=False, msg=err, rc=rc)
+
+ res['rc'] = rc
+ res['results'].append(out)
+ res['msg'] += err
+ res['changed'] = True
+
+ if ('Nothing to do' in out and rc == 0) or ('does not have any packages' in err):
+ res['changed'] = False
+
+ if rc != 0:
+ res['changed'] = False
+ self.module.fail_json(**res)
+
+ # Fail if yum prints 'No space left on device' because that means some
+ # packages failed executing their post install scripts because of lack of
+ # free space (e.g. kernel package couldn't generate initramfs). Note that
+ # yum can still exit with rc=0 even if some post scripts didn't execute
+ # correctly.
+ if 'No space left on device' in (out or err):
+ res['changed'] = False
+ res['msg'] = 'No space left on device'
+ self.module.fail_json(**res)
+
+ # FIXME - if we did an install - go and check the rpmdb to see if it actually installed
+ # look for each pkg in rpmdb
+ # look for each pkg via obsoletes
+
+ return res
+
+ def install(self, items, repoq):
+
+ pkgs = []
+ downgrade_pkgs = []
+ res = {}
+ res['results'] = []
+ res['msg'] = ''
+ res['rc'] = 0
+ res['changed'] = False
+
+ for spec in items:
+ pkg = None
+ downgrade_candidate = False
+
+ # check if pkgspec is installed (if possible for idempotence)
+ if spec.endswith('.rpm') or '://' in spec:
+ if '://' not in spec and not os.path.exists(spec):
+ res['msg'] += "No RPM file matching '%s' found on system" % spec
+ res['results'].append("No RPM file matching '%s' found on system" % spec)
+ res['rc'] = 127 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ if '://' in spec:
+ with self.set_env_proxy():
+ package = fetch_file(self.module, spec)
+ if not package.endswith('.rpm'):
+ # yum requires a local file to have the extension of .rpm and we
+ # can not guarantee that from an URL (redirects, proxies, etc)
+ new_package_path = '%s.rpm' % package
+ os.rename(package, new_package_path)
+ package = new_package_path
+ else:
+ package = spec
+
+ # most common case is the pkg is already installed
+ envra = self.local_envra(package)
+ if envra is None:
+ self.module.fail_json(msg="Failed to get envra information from RPM package: %s" % spec)
+ installed_pkgs = self.is_installed(repoq, envra)
+ if installed_pkgs:
+ res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], package))
+ continue
+
+ (name, ver, rel, epoch, arch) = splitFilename(envra)
+ installed_pkgs = self.is_installed(repoq, name)
+
+ # case for two same envr but different archs like x86_64 and i686
+ if len(installed_pkgs) == 2:
+ (cur_name0, cur_ver0, cur_rel0, cur_epoch0, cur_arch0) = splitFilename(installed_pkgs[0])
+ (cur_name1, cur_ver1, cur_rel1, cur_epoch1, cur_arch1) = splitFilename(installed_pkgs[1])
+ cur_epoch0 = cur_epoch0 or '0'
+ cur_epoch1 = cur_epoch1 or '0'
+ compare = compareEVR((cur_epoch0, cur_ver0, cur_rel0), (cur_epoch1, cur_ver1, cur_rel1))
+ if compare == 0 and cur_arch0 != cur_arch1:
+ for installed_pkg in installed_pkgs:
+ if installed_pkg.endswith(arch):
+ installed_pkgs = [installed_pkg]
+
+ if len(installed_pkgs) == 1:
+ installed_pkg = installed_pkgs[0]
+ (cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(installed_pkg)
+ cur_epoch = cur_epoch or '0'
+ compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
+
+ # compare > 0 -> higher version is installed
+ # compare == 0 -> exact version is installed
+ # compare < 0 -> lower version is installed
+ if compare > 0 and self.allow_downgrade:
+ downgrade_candidate = True
+ elif compare >= 0:
+ continue
+
+ # else: if there are more installed packages with the same name, that would mean
+ # kernel, gpg-pubkey or like, so just let yum deal with it and try to install it
+
+ pkg = package
+
+ # groups
+ elif spec.startswith('@'):
+ if self.is_group_env_installed(spec):
+ continue
+
+ pkg = spec
+
+ # range requires or file-requires or pkgname :(
+ else:
+ # most common case is the pkg is already installed and done
+ # short circuit all the bs - and search for it as a pkg in is_installed
+ # if you find it then we're done
+ if not set(['*', '?']).intersection(set(spec)):
+ installed_pkgs = self.is_installed(repoq, spec, is_pkg=True)
+ if installed_pkgs:
+ res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec))
+ continue
+
+ # look up what pkgs provide this
+ pkglist = self.what_provides(repoq, spec)
+ if not pkglist:
+ res['msg'] += "No package matching '%s' found available, installed or updated" % spec
+ res['results'].append("No package matching '%s' found available, installed or updated" % spec)
+ res['rc'] = 126 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ # if any of the packages are involved in a transaction, fail now
+ # so that we don't hang on the yum operation later
+ conflicts = self.transaction_exists(pkglist)
+ if conflicts:
+ res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
+ res['rc'] = 125 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ # if any of them are installed
+ # then nothing to do
+
+ found = False
+ for this in pkglist:
+ if self.is_installed(repoq, this, is_pkg=True):
+ found = True
+ res['results'].append('%s providing %s is already installed' % (this, spec))
+ break
+
+ # if the version of the pkg you have installed is not in ANY repo, but there are
+ # other versions in the repos (both higher and lower) then the previous checks won't work.
+ # so we check one more time. This really only works for pkgname - not for file provides or virt provides
+ # but virt provides should be all caught in what_provides on its own.
+ # highly irritating
+ if not found:
+ if self.is_installed(repoq, spec):
+ found = True
+ res['results'].append('package providing %s is already installed' % (spec))
+
+ if found:
+ continue
+
+ # Downgrade - The yum install command will only install or upgrade to a spec version, it will
+ # not install an older version of an RPM even if specified by the install spec. So we need to
+ # determine if this is a downgrade, and then use the yum downgrade command to install the RPM.
+ if self.allow_downgrade:
+ for package in pkglist:
+ # Get the NEVRA of the requested package using pkglist instead of spec because pkglist
+ # contains consistently-formatted package names returned by yum, rather than user input
+ # that is often not parsed correctly by splitFilename().
+ (name, ver, rel, epoch, arch) = splitFilename(package)
+
+ # Check if any version of the requested package is installed
+ inst_pkgs = self.is_installed(repoq, name, is_pkg=True)
+ if inst_pkgs:
+ (cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(inst_pkgs[0])
+ compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
+ if compare > 0:
+ downgrade_candidate = True
+ else:
+ downgrade_candidate = False
+ break
+
+ # If package needs to be installed/upgraded/downgraded, then pass in the spec
+ # we could get here if nothing provides it but that's not
+ # the error we're catching here
+ pkg = spec
+
+ if downgrade_candidate and self.allow_downgrade:
+ downgrade_pkgs.append(pkg)
+ else:
+ pkgs.append(pkg)
+
+ if downgrade_pkgs:
+ res = self.exec_install(items, 'downgrade', downgrade_pkgs, res)
+
+ if pkgs:
+ res = self.exec_install(items, 'install', pkgs, res)
+
+ return res
+
+ def remove(self, items, repoq):
+
+ pkgs = []
+ res = {}
+ res['results'] = []
+ res['msg'] = ''
+ res['changed'] = False
+ res['rc'] = 0
+
+ for pkg in items:
+ if pkg.startswith('@'):
+ installed = self.is_group_env_installed(pkg)
+ else:
+ installed = self.is_installed(repoq, pkg)
+
+ if installed:
+ pkgs.append(pkg)
+ else:
+ res['results'].append('%s is not installed' % pkg)
+
+ if pkgs:
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, results=res['results'], changes=dict(removed=pkgs))
+ else:
+ res['changes'] = dict(removed=pkgs)
+
+ # run an actual yum transaction
+ if self.autoremove:
+ cmd = self.yum_basecmd + ["autoremove"] + pkgs
+ else:
+ cmd = self.yum_basecmd + ["remove"] + pkgs
+ rc, out, err = self.module.run_command(cmd)
+
+ res['rc'] = rc
+ res['results'].append(out)
+ res['msg'] = err
+
+ if rc != 0:
+ if self.autoremove and 'No such command' in out:
+ self.module.fail_json(msg='Version of YUM too old for autoremove: Requires yum 3.4.3 (RHEL/CentOS 7+)')
+ else:
+ self.module.fail_json(**res)
+
+ # compile the results into one batch. If anything is changed
+ # then mark changed
+ # at the end - if we've end up failed then fail out of the rest
+ # of the process
+
+ # at this point we check to see if the pkg is no longer present
+ self._yum_base = None # previous YumBase package index is now invalid
+ for pkg in pkgs:
+ if pkg.startswith('@'):
+ installed = self.is_group_env_installed(pkg)
+ else:
+ installed = self.is_installed(repoq, pkg, is_pkg=True)
+
+ if installed:
+ # Return a message so it's obvious to the user why yum failed
+ # and which package couldn't be removed. More details:
+ # https://github.com/ansible/ansible/issues/35672
+ res['msg'] = "Package '%s' couldn't be removed!" % pkg
+ self.module.fail_json(**res)
+
+ res['changed'] = True
+
+ return res
+
+ def run_check_update(self):
+ # run check-update to see if we have packages pending
+ if self.releasever:
+ rc, out, err = self.module.run_command(self.yum_basecmd + ['check-update'] + ['--releasever=%s' % self.releasever])
+ else:
+ rc, out, err = self.module.run_command(self.yum_basecmd + ['check-update'])
+ return rc, out, err
+
+ @staticmethod
+ def parse_check_update(check_update_output):
+ # preprocess string and filter out empty lines so the regex below works
+ out = '\n'.join((l for l in check_update_output.splitlines() if l))
+
+ # Remove incorrect new lines in longer columns in output from yum check-update
+ # yum line wrapping can move the repo to the next line:
+ # some_looooooooooooooooooooooooooooooooooooong_package_name 1:1.2.3-1.el7
+ # some-repo-label
+ out = re.sub(r'\n\W+(.*)', r' \1', out)
+
+ updates = {}
+ obsoletes = {}
+ for line in out.split('\n'):
+ line = line.split()
+ """
+ Ignore irrelevant lines:
+ - '*' in line matches lines like mirror lists: "* base: mirror.corbina.net"
+ - len(line) != 3 or 6 could be strings like:
+ "This system is not registered with an entitlement server..."
+ - len(line) = 6 is package obsoletes
+ - checking for '.' in line[0] (package name) likely ensures that it is of format:
+ "package_name.arch" (coreutils.x86_64)
+ """
+ if '*' in line or len(line) not in [3, 6] or '.' not in line[0]:
+ continue
+
+ pkg, version, repo = line[0], line[1], line[2]
+ name, dist = pkg.rsplit('.', 1)
+
+ if name not in updates:
+ updates[name] = []
+
+ updates[name].append({'version': version, 'dist': dist, 'repo': repo})
+
+ if len(line) == 6:
+ obsolete_pkg, obsolete_version, obsolete_repo = line[3], line[4], line[5]
+ obsolete_name, obsolete_dist = obsolete_pkg.rsplit('.', 1)
+
+ if obsolete_name not in obsoletes:
+ obsoletes[obsolete_name] = []
+
+ obsoletes[obsolete_name].append({'version': obsolete_version, 'dist': obsolete_dist, 'repo': obsolete_repo})
+
+ return updates, obsoletes
+
+ def latest(self, items, repoq):
+
+ res = {}
+ res['results'] = []
+ res['msg'] = ''
+ res['changed'] = False
+ res['rc'] = 0
+ pkgs = {}
+ pkgs['update'] = []
+ pkgs['install'] = []
+ updates = {}
+ obsoletes = {}
+ update_all = False
+ cmd = self.yum_basecmd[:]
+
+ # determine if we're doing an update all
+ if '*' in items:
+ update_all = True
+
+ rc, out, err = self.run_check_update()
+
+ if rc == 0 and update_all:
+ res['results'].append('Nothing to do here, all packages are up to date')
+ return res
+ elif rc == 100:
+ updates, obsoletes = self.parse_check_update(out)
+ elif rc == 1:
+ res['msg'] = err
+ res['rc'] = rc
+ self.module.fail_json(**res)
+
+ if update_all:
+ cmd.append('update')
+ will_update = set(updates.keys())
+ will_update_from_other_package = dict()
+ else:
+ will_update = set()
+ will_update_from_other_package = dict()
+ for spec in items:
+ # some guess work involved with groups. update @<group> will install the group if missing
+ if spec.startswith('@'):
+ pkgs['update'].append(spec)
+ will_update.add(spec)
+ continue
+
+ # check if pkgspec is installed (if possible for idempotence)
+ # localpkg
+ if spec.endswith('.rpm') and '://' not in spec:
+ if not os.path.exists(spec):
+ res['msg'] += "No RPM file matching '%s' found on system" % spec
+ res['results'].append("No RPM file matching '%s' found on system" % spec)
+ res['rc'] = 127 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ # get the pkg e:name-v-r.arch
+ envra = self.local_envra(spec)
+
+ if envra is None:
+ self.module.fail_json(msg="Failed to get envra information from RPM package: %s" % spec)
+
+ # local rpm files can't be updated
+ if self.is_installed(repoq, envra):
+ pkgs['update'].append(spec)
+ else:
+ pkgs['install'].append(spec)
+ continue
+
+ # URL
+ if '://' in spec:
+ # download package so that we can check if it's already installed
+ with self.set_env_proxy():
+ package = fetch_file(self.module, spec)
+ envra = self.local_envra(package)
+
+ if envra is None:
+ self.module.fail_json(msg="Failed to get envra information from RPM package: %s" % spec)
+
+ # local rpm files can't be updated
+ if self.is_installed(repoq, envra):
+ pkgs['update'].append(spec)
+ else:
+ pkgs['install'].append(spec)
+ continue
+
+ # dep/pkgname - find it
+ if self.is_installed(repoq, spec):
+ pkgs['update'].append(spec)
+ else:
+ pkgs['install'].append(spec)
+ pkglist = self.what_provides(repoq, spec)
+ # FIXME..? may not be desirable to throw an exception here if a single package is missing
+ if not pkglist:
+ res['msg'] += "No package matching '%s' found available, installed or updated" % spec
+ res['results'].append("No package matching '%s' found available, installed or updated" % spec)
+ res['rc'] = 126 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ nothing_to_do = True
+ for pkg in pkglist:
+ if spec in pkgs['install'] and self.is_available(repoq, pkg):
+ nothing_to_do = False
+ break
+
+ # this contains the full NVR and spec could contain wildcards
+ # or virtual provides (like "python-*" or "smtp-daemon") while
+ # updates contains name only.
+ pkgname, _, _, _, _ = splitFilename(pkg)
+ if spec in pkgs['update'] and pkgname in updates:
+ nothing_to_do = False
+ will_update.add(spec)
+ # Massage the updates list
+ if spec != pkgname:
+ # For reporting what packages would be updated more
+ # succinctly
+ will_update_from_other_package[spec] = pkgname
+ break
+
+ if not self.is_installed(repoq, spec) and self.update_only:
+ res['results'].append("Packages providing %s not installed due to update_only specified" % spec)
+ continue
+ if nothing_to_do:
+ res['results'].append("All packages providing %s are up to date" % spec)
+ continue
+
+ # if any of the packages are involved in a transaction, fail now
+ # so that we don't hang on the yum operation later
+ conflicts = self.transaction_exists(pkglist)
+ if conflicts:
+ res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
+ res['results'].append("The following packages have pending transactions: %s" % ", ".join(conflicts))
+ res['rc'] = 128 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ # check_mode output
+ to_update = []
+ for w in will_update:
+ if w.startswith('@'):
+ # yum groups
+ to_update.append((w, None))
+ elif w not in updates:
+ # There are (at least, probably more) 2 ways we can get here:
+ #
+ # * A virtual provides (our user specifies "webserver", but
+ # "httpd" is the key in 'updates').
+ #
+ # * A wildcard. emac* will get us here if there's a package
+ # called 'emacs' in the pending updates list. 'updates' will
+ # of course key on 'emacs' in that case.
+
+ other_pkg = will_update_from_other_package[w]
+
+ # We are guaranteed that: other_pkg in updates
+ # ...based on the logic above. But we only want to show one
+ # update in this case (given the wording of "at least") below.
+ # As an example, consider a package installed twice:
+ # foobar.x86_64, foobar.i686
+ # We want to avoid having both:
+ # ('foo*', 'because of (at least) foobar-1.x86_64 from repo')
+ # ('foo*', 'because of (at least) foobar-1.i686 from repo')
+ # We just pick the first one.
+ #
+ # TODO: This is something that might be nice to change, but it
+ # would be a module UI change. But without it, we're
+ # dropping potentially important information about what
+ # was updated. Instead of (given_spec, random_matching_package)
+ # it'd be nice if we appended (given_spec, [all_matching_packages])
+ #
+ # ... But then, we also drop information if multiple
+ # different (distinct) packages match the given spec and
+ # we should probably fix that too.
+ pkg = updates[other_pkg][0]
+ to_update.append(
+ (
+ w,
+ 'because of (at least) %s-%s.%s from %s' % (
+ other_pkg,
+ pkg['version'],
+ pkg['dist'],
+ pkg['repo']
+ )
+ )
+ )
+ else:
+ # Otherwise the spec is an exact match
+ for pkg in updates[w]:
+ to_update.append(
+ (
+ w,
+ '%s.%s from %s' % (
+ pkg['version'],
+ pkg['dist'],
+ pkg['repo']
+ )
+ )
+ )
+
+ if self.update_only:
+ res['changes'] = dict(installed=[], updated=to_update)
+ else:
+ res['changes'] = dict(installed=pkgs['install'], updated=to_update)
+
+ if obsoletes:
+ res['obsoletes'] = obsoletes
+
+ # return results before we actually execute stuff
+ if self.module.check_mode:
+ if will_update or pkgs['install']:
+ res['changed'] = True
+ return res
+
+ if self.releasever:
+ cmd.extend(['--releasever=%s' % self.releasever])
+
+ # run commands
+ if update_all:
+ rc, out, err = self.module.run_command(cmd)
+ res['changed'] = True
+ elif self.update_only:
+ if pkgs['update']:
+ cmd += ['update'] + pkgs['update']
+ locale = get_best_parsable_locale(self.module)
+ lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
+ rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
+ out_lower = out.strip().lower()
+ if not out_lower.endswith("no packages marked for update") and \
+ not out_lower.endswith("nothing to do"):
+ res['changed'] = True
+ else:
+ rc, out, err = [0, '', '']
+ elif pkgs['install'] or will_update and not self.update_only:
+ cmd += ['install'] + pkgs['install'] + pkgs['update']
+ locale = get_best_parsable_locale(self.module)
+ lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
+ rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
+ out_lower = out.strip().lower()
+ if not out_lower.endswith("no packages marked for update") and \
+ not out_lower.endswith("nothing to do"):
+ res['changed'] = True
+ else:
+ rc, out, err = [0, '', '']
+
+ res['rc'] = rc
+ res['msg'] += err
+ res['results'].append(out)
+
+ if rc:
+ res['failed'] = True
+
+ return res
+
+ def ensure(self, repoq):
+ pkgs = self.names
+
+ # autoremove was provided without `name`
+ if not self.names and self.autoremove:
+ pkgs = []
+ self.state = 'absent'
+
+ if self.conf_file and os.path.exists(self.conf_file):
+ self.yum_basecmd += ['-c', self.conf_file]
+
+ if repoq:
+ repoq += ['-c', self.conf_file]
+
+ if self.skip_broken:
+ self.yum_basecmd.extend(['--skip-broken'])
+
+ if self.disablerepo:
+ self.yum_basecmd.extend(['--disablerepo=%s' % ','.join(self.disablerepo)])
+
+ if self.enablerepo:
+ self.yum_basecmd.extend(['--enablerepo=%s' % ','.join(self.enablerepo)])
+
+ if self.enable_plugin:
+ self.yum_basecmd.extend(['--enableplugin', ','.join(self.enable_plugin)])
+
+ if self.disable_plugin:
+ self.yum_basecmd.extend(['--disableplugin', ','.join(self.disable_plugin)])
+
+ if self.exclude:
+ e_cmd = ['--exclude=%s' % ','.join(self.exclude)]
+ self.yum_basecmd.extend(e_cmd)
+
+ if self.disable_excludes:
+ self.yum_basecmd.extend(['--disableexcludes=%s' % self.disable_excludes])
+
+ if self.cacheonly:
+ self.yum_basecmd.extend(['--cacheonly'])
+
+ if self.download_only:
+ self.yum_basecmd.extend(['--downloadonly'])
+
+ if self.download_dir:
+ self.yum_basecmd.extend(['--downloaddir=%s' % self.download_dir])
+
+ if self.releasever:
+ self.yum_basecmd.extend(['--releasever=%s' % self.releasever])
+
+ if self.installroot != '/':
+ # do not setup installroot by default, because of error
+ # CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
+ # in old yum version (like in CentOS 6.6)
+ e_cmd = ['--installroot=%s' % self.installroot]
+ self.yum_basecmd.extend(e_cmd)
+
+ if self.state in ('installed', 'present', 'latest'):
+ """ The need of this entire if conditional has to be changed
+ this function is the ensure function that is called
+ in the main section.
+
+ This conditional tends to disable/enable repo for
+ install present latest action, same actually
+ can be done for remove and absent action
+
+ As solution I would advice to cal
+ try: self.yum_base.repos.disableRepo(disablerepo)
+ and
+ try: self.yum_base.repos.enableRepo(enablerepo)
+ right before any yum_cmd is actually called regardless
+ of yum action.
+
+ Please note that enable/disablerepo options are general
+ options, this means that we can call those with any action
+ option. https://linux.die.net/man/8/yum
+
+ This docstring will be removed together when issue: #21619
+ will be solved.
+
+ This has been triggered by: #19587
+ """
+
+ if self.update_cache:
+ self.module.run_command(self.yum_basecmd + ['clean', 'expire-cache'])
+
+ try:
+ current_repos = self.yum_base.repos.repos.keys()
+ if self.enablerepo:
+ try:
+ new_repos = self.yum_base.repos.repos.keys()
+ for i in new_repos:
+ if i not in current_repos:
+ rid = self.yum_base.repos.getRepo(i)
+ a = rid.repoXML.repoid # nopep8 - https://github.com/ansible/ansible/pull/21475#pullrequestreview-22404868
+ current_repos = new_repos
+ except yum.Errors.YumBaseError as e:
+ self.module.fail_json(msg="Error setting/accessing repos: %s" % to_native(e))
+ except yum.Errors.YumBaseError as e:
+ self.module.fail_json(msg="Error accessing repos: %s" % to_native(e))
+ if self.state == 'latest' or self.update_only:
+ if self.disable_gpg_check:
+ self.yum_basecmd.append('--nogpgcheck')
+ if self.security:
+ self.yum_basecmd.append('--security')
+ if self.bugfix:
+ self.yum_basecmd.append('--bugfix')
+ res = self.latest(pkgs, repoq)
+ elif self.state in ('installed', 'present'):
+ if self.disable_gpg_check:
+ self.yum_basecmd.append('--nogpgcheck')
+ res = self.install(pkgs, repoq)
+ elif self.state in ('removed', 'absent'):
+ res = self.remove(pkgs, repoq)
+ else:
+ # should be caught by AnsibleModule argument_spec
+ self.module.fail_json(
+ msg="we should never get here unless this all failed",
+ changed=False,
+ results='',
+ errors='unexpected state'
+ )
+ return res
+
+ @staticmethod
+ def has_yum():
+ return HAS_YUM_PYTHON
+
+ def run(self):
+ """
+ actually execute the module code backend
+ """
+
+ if (not HAS_RPM_PYTHON or not HAS_YUM_PYTHON) and sys.executable != '/usr/bin/python' and not has_respawned():
+ respawn_module('/usr/bin/python')
+ # end of the line for this process; we'll exit here once the respawned module has completed
+
+ error_msgs = []
+ if not HAS_RPM_PYTHON:
+ error_msgs.append('The Python 2 bindings for rpm are needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
+ if not HAS_YUM_PYTHON:
+ error_msgs.append('The Python 2 yum module is needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
+
+ self.wait_for_lock()
+
+ if error_msgs:
+ self.module.fail_json(msg='. '.join(error_msgs))
+
+ # fedora will redirect yum to dnf, which has incompatibilities
+ # with how this module expects yum to operate. If yum-deprecated
+ # is available, use that instead to emulate the old behaviors.
+ if self.module.get_bin_path('yum-deprecated'):
+ yumbin = self.module.get_bin_path('yum-deprecated')
+ else:
+ yumbin = self.module.get_bin_path('yum')
+
+ # need debug level 2 to get 'Nothing to do' for groupinstall.
+ self.yum_basecmd = [yumbin, '-d', '2', '-y']
+
+ if self.update_cache and not self.names and not self.list:
+ rc, stdout, stderr = self.module.run_command(self.yum_basecmd + ['clean', 'expire-cache'])
+ if rc == 0:
+ self.module.exit_json(
+ changed=False,
+ msg="Cache updated",
+ rc=rc,
+ results=[]
+ )
+ else:
+ self.module.exit_json(
+ changed=False,
+ msg="Failed to update cache",
+ rc=rc,
+ results=[stderr],
+ )
+
+ repoquerybin = self.module.get_bin_path('repoquery', required=False)
+
+ if self.install_repoquery and not repoquerybin and not self.module.check_mode:
+ yum_path = self.module.get_bin_path('yum')
+ if yum_path:
+ if self.releasever:
+ self.module.run_command('%s -y install yum-utils --releasever %s' % (yum_path, self.releasever))
+ else:
+ self.module.run_command('%s -y install yum-utils' % yum_path)
+ repoquerybin = self.module.get_bin_path('repoquery', required=False)
+
+ if self.list:
+ if not repoquerybin:
+ self.module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.")
+ results = {'results': self.list_stuff(repoquerybin, self.list)}
+ else:
+ # If rhn-plugin is installed and no rhn-certificate is available on
+ # the system then users will see an error message using the yum API.
+ # Use repoquery in those cases.
+
+ repoquery = None
+ try:
+ yum_plugins = self.yum_base.plugins._plugins
+ except AttributeError:
+ pass
+ else:
+ if 'rhnplugin' in yum_plugins:
+ if repoquerybin:
+ repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
+ if self.installroot != '/':
+ repoquery.extend(['--installroot', self.installroot])
+
+ if self.disable_excludes:
+ # repoquery does not support --disableexcludes,
+ # so make a temp copy of yum.conf and get rid of the 'exclude=' line there
+ try:
+ with open('/etc/yum.conf', 'r') as f:
+ content = f.readlines()
+
+ tmp_conf_file = tempfile.NamedTemporaryFile(dir=self.module.tmpdir, delete=False)
+ self.module.add_cleanup_file(tmp_conf_file.name)
+
+ tmp_conf_file.writelines([c for c in content if not c.startswith("exclude=")])
+ tmp_conf_file.close()
+ except Exception as e:
+ self.module.fail_json(msg="Failure setting up repoquery: %s" % to_native(e))
+
+ repoquery.extend(['-c', tmp_conf_file.name])
+
+ results = self.ensure(repoquery)
+ if repoquery:
+ results['msg'] = '%s %s' % (
+ results.get('msg', ''),
+ 'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.'
+ )
+
+ self.module.exit_json(**results)
+
+
+def main():
+ # state=installed name=pkgspec
+ # state=removed name=pkgspec
+ # state=latest name=pkgspec
+ #
+ # informational commands:
+ # list=installed
+ # list=updates
+ # list=available
+ # list=repos
+ # list=pkgspec
+
+ yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'yum', 'yum4', 'dnf'])
+
+ module = AnsibleModule(
+ **yumdnf_argument_spec
+ )
+
+ module_implementation = YumModule(module)
+ module_implementation.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/yum_repository.py b/lib/ansible/modules/yum_repository.py
new file mode 100644
index 0000000..84a10b9
--- /dev/null
+++ b/lib/ansible/modules/yum_repository.py
@@ -0,0 +1,735 @@
+# encoding: utf-8
+
+# (c) 2015-2016, Jiri Tyr <jiri.tyr@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: yum_repository
+author: Jiri Tyr (@jtyr)
+version_added: '2.1'
+short_description: Add or remove YUM repositories
+description:
+ - Add or remove YUM repositories in RPM-based Linux distributions.
+ - If you wish to update an existing repository definition use M(community.general.ini_file) instead.
+
+options:
+ async:
+ description:
+ - If set to C(true) Yum will download packages and metadata from this
+ repo in parallel, if possible.
+ - In ansible-core 2.11, 2.12, and 2.13 the default value is C(true).
+ - This option has been deprecated in RHEL 8. If you're using one of the
+ versions listed above, you can set this option to None to avoid passing an
+ unknown configuration option.
+ type: bool
+ bandwidth:
+ description:
+ - Maximum available network bandwidth in bytes/second. Used with the
+ I(throttle) option.
+ - If I(throttle) is a percentage and bandwidth is C(0) then bandwidth
+ throttling will be disabled. If I(throttle) is expressed as a data rate
+ (bytes/sec) then this option is ignored. Default is C(0) (no bandwidth
+ throttling).
+ type: str
+ default: '0'
+ baseurl:
+ description:
+ - URL to the directory where the yum repository's 'repodata' directory
+ lives.
+ - It can also be a list of multiple URLs.
+ - This, the I(metalink) or I(mirrorlist) parameters are required if I(state) is set to
+ C(present).
+ type: list
+ elements: str
+ cost:
+ description:
+ - Relative cost of accessing this repository. Useful for weighing one
+ repo's packages as greater/less than any other.
+ type: str
+ default: '1000'
+ deltarpm_metadata_percentage:
+ description:
+ - When the relative size of deltarpm metadata vs pkgs is larger than
+ this, deltarpm metadata is not downloaded from the repo. Note that you
+ can give values over C(100), so C(200) means that the metadata is
+ required to be half the size of the packages. Use C(0) to turn off
+ this check, and always download metadata.
+ type: str
+ default: '100'
+ deltarpm_percentage:
+ description:
+ - When the relative size of delta vs pkg is larger than this, delta is
+ not used. Use C(0) to turn off delta rpm processing. Local repositories
+ (with file:// I(baseurl)) have delta rpms turned off by default.
+ type: str
+ default: '75'
+ description:
+ description:
+ - A human readable string describing the repository. This option corresponds to the "name" property in the repo file.
+ - This parameter is only required if I(state) is set to C(present).
+ type: str
+ enabled:
+ description:
+ - This tells yum whether or not use this repository.
+ - Yum default value is C(true).
+ type: bool
+ enablegroups:
+ description:
+ - Determines whether yum will allow the use of package groups for this
+ repository.
+ - Yum default value is C(true).
+ type: bool
+ exclude:
+ description:
+ - List of packages to exclude from updates or installs. This should be a
+ space separated list. Shell globs using wildcards (eg. C(*) and C(?))
+ are allowed.
+ - The list can also be a regular YAML array.
+ type: list
+ elements: str
+ failovermethod:
+ choices: [roundrobin, priority]
+ default: roundrobin
+ description:
+ - C(roundrobin) randomly selects a URL out of the list of URLs to start
+ with and proceeds through each of them as it encounters a failure
+ contacting the host.
+ - C(priority) starts from the first I(baseurl) listed and reads through
+ them sequentially.
+ type: str
+ file:
+ description:
+ - File name without the C(.repo) extension to save the repo in. Defaults
+ to the value of I(name).
+ type: str
+ gpgcakey:
+ description:
+ - A URL pointing to the ASCII-armored CA key file for the repository.
+ type: str
+ gpgcheck:
+ description:
+ - Tells yum whether or not it should perform a GPG signature check on
+ packages.
+ - No default setting. If the value is not set, the system setting from
+ C(/etc/yum.conf) or system default of C(false) will be used.
+ type: bool
+ gpgkey:
+ description:
+ - A URL pointing to the ASCII-armored GPG key file for the repository.
+ - It can also be a list of multiple URLs.
+ type: list
+ elements: str
+ module_hotfixes:
+ description:
+ - Disable module RPM filtering and make all RPMs from the repository
+ available. The default is C(None).
+ version_added: '2.11'
+ type: bool
+ http_caching:
+ description:
+ - Determines how upstream HTTP caches are instructed to handle any HTTP
+ downloads that Yum does.
+ - C(all) means that all HTTP downloads should be cached.
+ - C(packages) means that only RPM package downloads should be cached (but
+ not repository metadata downloads).
+ - C(none) means that no HTTP downloads should be cached.
+ choices: [all, packages, none]
+ type: str
+ default: all
+ include:
+ description:
+ - Include external configuration file. Both, local path and URL is
+ supported. Configuration file will be inserted at the position of the
+ I(include=) line. Included files may contain further include lines.
+ Yum will abort with an error if an inclusion loop is detected.
+ type: str
+ includepkgs:
+ description:
+ - List of packages you want to only use from a repository. This should be
+ a space separated list. Shell globs using wildcards (eg. C(*) and C(?))
+ are allowed. Substitution variables (e.g. C($releasever)) are honored
+ here.
+ - The list can also be a regular YAML array.
+ type: list
+ elements: str
+ ip_resolve:
+ description:
+ - Determines how yum resolves host names.
+ - C(4) or C(IPv4) - resolve to IPv4 addresses only.
+ - C(6) or C(IPv6) - resolve to IPv6 addresses only.
+ choices: ['4', '6', IPv4, IPv6, whatever]
+ type: str
+ default: whatever
+ keepalive:
+ description:
+ - This tells yum whether or not HTTP/1.1 keepalive should be used with
+ this repository. This can improve transfer speeds by using one
+ connection when downloading multiple files from a repository.
+ type: bool
+ default: 'no'
+ keepcache:
+ description:
+ - Either C(1) or C(0). Determines whether or not yum keeps the cache of
+ headers and packages after successful installation.
+ choices: ['0', '1']
+ type: str
+ default: '1'
+ metadata_expire:
+ description:
+ - Time (in seconds) after which the metadata will expire.
+ - Default value is 6 hours.
+ type: str
+ default: '21600'
+ metadata_expire_filter:
+ description:
+ - Filter the I(metadata_expire) time, allowing a trade of speed for
+ accuracy if a command doesn't require it. Each yum command can specify
+ that it requires a certain level of timeliness quality from the remote
+ repos. from "I'm about to install/upgrade, so this better be current"
+ to "Anything that's available is good enough".
+ - C(never) - Nothing is filtered, always obey I(metadata_expire).
+ - C(read-only:past) - Commands that only care about past information are
+ filtered from metadata expiring. Eg. I(yum history) info (if history
+ needs to lookup anything about a previous transaction, then by
+ definition the remote package was available in the past).
+ - C(read-only:present) - Commands that are balanced between past and
+ future. Eg. I(yum list yum).
+ - C(read-only:future) - Commands that are likely to result in running
+ other commands which will require the latest metadata. Eg.
+ I(yum check-update).
+ - Note that this option does not override "yum clean expire-cache".
+ choices: [never, 'read-only:past', 'read-only:present', 'read-only:future']
+ type: str
+ default: 'read-only:present'
+ metalink:
+ description:
+ - Specifies a URL to a metalink file for the repomd.xml, a list of
+ mirrors for the entire repository are generated by converting the
+ mirrors for the repomd.xml file to a I(baseurl).
+ - This, the I(baseurl) or I(mirrorlist) parameters are required if I(state) is set to
+ C(present).
+ type: str
+ mirrorlist:
+ description:
+ - Specifies a URL to a file containing a list of baseurls.
+ - This, the I(baseurl) or I(metalink) parameters are required if I(state) is set to
+ C(present).
+ type: str
+ mirrorlist_expire:
+ description:
+ - Time (in seconds) after which the mirrorlist locally cached will
+ expire.
+ - Default value is 6 hours.
+ type: str
+ default: '21600'
+ name:
+ description:
+ - Unique repository ID. This option builds the section name of the repository in the repo file.
+ - This parameter is only required if I(state) is set to C(present) or
+ C(absent).
+ type: str
+ required: true
+ password:
+ description:
+ - Password to use with the username for basic authentication.
+ type: str
+ priority:
+ description:
+ - Enforce ordered protection of repositories. The value is an integer
+ from 1 to 99.
+ - This option only works if the YUM Priorities plugin is installed.
+ type: str
+ default: '99'
+ protect:
+ description:
+ - Protect packages from updates from other repositories.
+ type: bool
+ default: 'no'
+ proxy:
+ description:
+ - URL to the proxy server that yum should use. Set to C(_none_) to
+ disable the global proxy setting.
+ type: str
+ proxy_password:
+ description:
+ - Password for this proxy.
+ type: str
+ proxy_username:
+ description:
+ - Username to use for proxy.
+ type: str
+ repo_gpgcheck:
+ description:
+ - This tells yum whether or not it should perform a GPG signature check
+ on the repodata from this repository.
+ type: bool
+ default: 'no'
+ reposdir:
+ description:
+ - Directory where the C(.repo) files will be stored.
+ type: path
+ default: /etc/yum.repos.d
+ retries:
+ description:
+ - Set the number of times any attempt to retrieve a file should retry
+ before returning an error. Setting this to C(0) makes yum try forever.
+ type: str
+ default: '10'
+ s3_enabled:
+ description:
+ - Enables support for S3 repositories.
+ - This option only works if the YUM S3 plugin is installed.
+ type: bool
+ default: 'no'
+ skip_if_unavailable:
+ description:
+ - If set to C(true) yum will continue running if this repository cannot be
+ contacted for any reason. This should be set carefully as all repos are
+ consulted for any given command.
+ type: bool
+ default: 'no'
+ ssl_check_cert_permissions:
+ description:
+ - Whether yum should check the permissions on the paths for the
+ certificates on the repository (both remote and local).
+ - If we can't read any of the files then yum will force
+ I(skip_if_unavailable) to be C(true). This is most useful for non-root
+ processes which use yum on repos that have client cert files which are
+ readable only by root.
+ type: bool
+ default: 'no'
+ sslcacert:
+ description:
+ - Path to the directory containing the databases of the certificate
+ authorities yum should use to verify SSL certificates.
+ type: str
+ aliases: [ ca_cert ]
+ sslclientcert:
+ description:
+ - Path to the SSL client certificate yum should use to connect to
+ repos/remote sites.
+ type: str
+ aliases: [ client_cert ]
+ sslclientkey:
+ description:
+ - Path to the SSL client key yum should use to connect to repos/remote
+ sites.
+ type: str
+ aliases: [ client_key ]
+ sslverify:
+ description:
+ - Defines whether yum should verify SSL certificates/hosts at all.
+ type: bool
+ default: 'yes'
+ aliases: [ validate_certs ]
+ state:
+ description:
+ - State of the repo file.
+ choices: [absent, present]
+ type: str
+ default: present
+ throttle:
+ description:
+ - Enable bandwidth throttling for downloads.
+ - This option can be expressed as a absolute data rate in bytes/sec. An
+ SI prefix (k, M or G) may be appended to the bandwidth value.
+ type: str
+ timeout:
+ description:
+ - Number of seconds to wait for a connection before timing out.
+ type: str
+ default: '30'
+ ui_repoid_vars:
+ description:
+ - When a repository id is displayed, append these yum variables to the
+ string if they are used in the I(baseurl)/etc. Variables are appended
+ in the order listed (and found).
+ type: str
+ default: releasever basearch
+ username:
+ description:
+ - Username to use for basic authentication to a repo or really any url.
+ type: str
+
+extends_documentation_fragment:
+ - action_common_attributes
+ - files
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: rhel
+notes:
+ - All comments will be removed if modifying an existing repo file.
+ - Section order is preserved in an existing repo file.
+ - Parameters in a section are ordered alphabetically in an existing repo
+ file.
+ - The repo file will be automatically deleted if it contains no repository.
+ - When removing a repository, beware that the metadata cache may still remain
+ on disk until you run C(yum clean all). Use a notification handler for this.
+ - "The C(params) parameter was removed in Ansible 2.5 due to circumventing Ansible's parameter
+ handling"
+'''
+
+EXAMPLES = '''
+- name: Add repository
+ ansible.builtin.yum_repository:
+ name: epel
+ description: EPEL YUM repo
+ baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+
+- name: Add multiple repositories into the same file (1/2)
+ ansible.builtin.yum_repository:
+ name: epel
+ description: EPEL YUM repo
+ file: external_repos
+ baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+ gpgcheck: no
+
+- name: Add multiple repositories into the same file (2/2)
+ ansible.builtin.yum_repository:
+ name: rpmforge
+ description: RPMforge YUM repo
+ file: external_repos
+ baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge
+ mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge
+ enabled: no
+
+# Handler showing how to clean yum metadata cache
+- name: yum-clean-metadata
+ ansible.builtin.command: yum clean metadata
+
+# Example removing a repository and cleaning up metadata cache
+- name: Remove repository (and clean up left-over metadata)
+ ansible.builtin.yum_repository:
+ name: epel
+ state: absent
+ notify: yum-clean-metadata
+
+- name: Remove repository from a specific repo file
+ ansible.builtin.yum_repository:
+ name: epel
+ file: external_repos
+ state: absent
+'''
+
+RETURN = '''
+repo:
+ description: repository name
+ returned: success
+ type: str
+ sample: "epel"
+state:
+ description: state of the target, after execution
+ returned: success
+ type: str
+ sample: "present"
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import configparser
+from ansible.module_utils._text import to_native
+
+
+class YumRepo(object):
+ # Class global variables
+ module = None
+ params = None
+ section = None
+ repofile = configparser.RawConfigParser()
+
+ # List of parameters which will be allowed in the repo file output
+ allowed_params = [
+ 'async',
+ 'bandwidth',
+ 'baseurl',
+ 'cost',
+ 'deltarpm_metadata_percentage',
+ 'deltarpm_percentage',
+ 'enabled',
+ 'enablegroups',
+ 'exclude',
+ 'failovermethod',
+ 'gpgcakey',
+ 'gpgcheck',
+ 'gpgkey',
+ 'module_hotfixes',
+ 'http_caching',
+ 'include',
+ 'includepkgs',
+ 'ip_resolve',
+ 'keepalive',
+ 'keepcache',
+ 'metadata_expire',
+ 'metadata_expire_filter',
+ 'metalink',
+ 'mirrorlist',
+ 'mirrorlist_expire',
+ 'name',
+ 'password',
+ 'priority',
+ 'protect',
+ 'proxy',
+ 'proxy_password',
+ 'proxy_username',
+ 'repo_gpgcheck',
+ 'retries',
+ 's3_enabled',
+ 'skip_if_unavailable',
+ 'sslcacert',
+ 'ssl_check_cert_permissions',
+ 'sslclientcert',
+ 'sslclientkey',
+ 'sslverify',
+ 'throttle',
+ 'timeout',
+ 'ui_repoid_vars',
+ 'username']
+
+ # List of parameters which can be a list
+ list_params = ['exclude', 'includepkgs']
+
+ def __init__(self, module):
+ # To be able to use fail_json
+ self.module = module
+ # Shortcut for the params
+ self.params = self.module.params
+ # Section is always the repoid
+ self.section = self.params['repoid']
+
+ # Check if repo directory exists
+ repos_dir = self.params['reposdir']
+ if not os.path.isdir(repos_dir):
+ self.module.fail_json(
+ msg="Repo directory '%s' does not exist." % repos_dir)
+
+ # Set dest; also used to set dest parameter for the FS attributes
+ self.params['dest'] = os.path.join(
+ repos_dir, "%s.repo" % self.params['file'])
+
+ # Read the repo file if it exists
+ if os.path.isfile(self.params['dest']):
+ self.repofile.read(self.params['dest'])
+
+ def add(self):
+ # Remove already existing repo and create a new one
+ if self.repofile.has_section(self.section):
+ self.repofile.remove_section(self.section)
+
+ # Add section
+ self.repofile.add_section(self.section)
+
+ # Baseurl/mirrorlist is not required because for removal we need only
+ # the repo name. This is why we check if the baseurl/mirrorlist is
+ # defined.
+ req_params = (self.params['baseurl'], self.params['metalink'], self.params['mirrorlist'])
+ if req_params == (None, None, None):
+ self.module.fail_json(
+ msg="Parameter 'baseurl', 'metalink' or 'mirrorlist' is required for "
+ "adding a new repo.")
+
+ # Set options
+ for key, value in sorted(self.params.items()):
+ if key in self.list_params and isinstance(value, list):
+ # Join items into one string for specific parameters
+ value = ' '.join(value)
+ elif isinstance(value, bool):
+ # Convert boolean value to integer
+ value = int(value)
+
+ # Set the value only if it was defined (default is None)
+ if value is not None and key in self.allowed_params:
+ self.repofile.set(self.section, key, value)
+
+ def save(self):
+ if len(self.repofile.sections()):
+ # Write data into the file
+ try:
+ with open(self.params['dest'], 'w') as fd:
+ self.repofile.write(fd)
+ except IOError as e:
+ self.module.fail_json(
+ msg="Problems handling file %s." % self.params['dest'],
+ details=to_native(e))
+ else:
+ # Remove the file if there are not repos
+ try:
+ os.remove(self.params['dest'])
+ except OSError as e:
+ self.module.fail_json(
+ msg=(
+ "Cannot remove empty repo file %s." %
+ self.params['dest']),
+ details=to_native(e))
+
+ def remove(self):
+ # Remove section if exists
+ if self.repofile.has_section(self.section):
+ self.repofile.remove_section(self.section)
+
+ def dump(self):
+ repo_string = ""
+
+ # Compose the repo file
+ for section in sorted(self.repofile.sections()):
+ repo_string += "[%s]\n" % section
+
+ for key, value in sorted(self.repofile.items(section)):
+ repo_string += "%s = %s\n" % (key, value)
+
+ repo_string += "\n"
+
+ return repo_string
+
+
+def main():
+ # Module settings
+ argument_spec = dict(
+ bandwidth=dict(),
+ baseurl=dict(type='list', elements='str'),
+ cost=dict(),
+ deltarpm_metadata_percentage=dict(),
+ deltarpm_percentage=dict(),
+ description=dict(),
+ enabled=dict(type='bool'),
+ enablegroups=dict(type='bool'),
+ exclude=dict(type='list', elements='str'),
+ failovermethod=dict(choices=['roundrobin', 'priority']),
+ file=dict(),
+ gpgcakey=dict(no_log=False),
+ gpgcheck=dict(type='bool'),
+ gpgkey=dict(type='list', elements='str', no_log=False),
+ module_hotfixes=dict(type='bool'),
+ http_caching=dict(choices=['all', 'packages', 'none']),
+ include=dict(),
+ includepkgs=dict(type='list', elements='str'),
+ ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
+ keepalive=dict(type='bool'),
+ keepcache=dict(choices=['0', '1']),
+ metadata_expire=dict(),
+ metadata_expire_filter=dict(
+ choices=[
+ 'never',
+ 'read-only:past',
+ 'read-only:present',
+ 'read-only:future']),
+ metalink=dict(),
+ mirrorlist=dict(),
+ mirrorlist_expire=dict(),
+ name=dict(required=True),
+ params=dict(type='dict'),
+ password=dict(no_log=True),
+ priority=dict(),
+ protect=dict(type='bool'),
+ proxy=dict(),
+ proxy_password=dict(no_log=True),
+ proxy_username=dict(),
+ repo_gpgcheck=dict(type='bool'),
+ reposdir=dict(default='/etc/yum.repos.d', type='path'),
+ retries=dict(),
+ s3_enabled=dict(type='bool'),
+ skip_if_unavailable=dict(type='bool'),
+ sslcacert=dict(aliases=['ca_cert']),
+ ssl_check_cert_permissions=dict(type='bool'),
+ sslclientcert=dict(aliases=['client_cert']),
+ sslclientkey=dict(aliases=['client_key'], no_log=False),
+ sslverify=dict(type='bool', aliases=['validate_certs']),
+ state=dict(choices=['present', 'absent'], default='present'),
+ throttle=dict(),
+ timeout=dict(),
+ ui_repoid_vars=dict(),
+ username=dict(),
+ )
+
+ argument_spec['async'] = dict(type='bool')
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # Params was removed
+ # https://meetbot.fedoraproject.org/ansible-meeting/2017-09-28/ansible_dev_meeting.2017-09-28-15.00.log.html
+ if module.params['params']:
+ module.fail_json(msg="The params option to yum_repository was removed in Ansible 2.5 since it circumvents Ansible's option handling")
+
+ name = module.params['name']
+ state = module.params['state']
+
+ # Check if required parameters are present
+ if state == 'present':
+ if (
+ module.params['baseurl'] is None and
+ module.params['metalink'] is None and
+ module.params['mirrorlist'] is None):
+ module.fail_json(
+ msg="Parameter 'baseurl', 'metalink' or 'mirrorlist' is required.")
+ if module.params['description'] is None:
+ module.fail_json(
+ msg="Parameter 'description' is required.")
+
+ # Rename "name" and "description" to ensure correct key sorting
+ module.params['repoid'] = module.params['name']
+ module.params['name'] = module.params['description']
+ del module.params['description']
+
+ # Change list type to string for baseurl and gpgkey
+ for list_param in ['baseurl', 'gpgkey']:
+ if (
+ list_param in module.params and
+ module.params[list_param] is not None):
+ module.params[list_param] = "\n".join(module.params[list_param])
+
+ # Define repo file name if it doesn't exist
+ if module.params['file'] is None:
+ module.params['file'] = module.params['repoid']
+
+ # Instantiate the YumRepo object
+ yumrepo = YumRepo(module)
+
+ # Get repo status before change
+ diff = {
+ 'before_header': yumrepo.params['dest'],
+ 'before': yumrepo.dump(),
+ 'after_header': yumrepo.params['dest'],
+ 'after': ''
+ }
+
+ # Perform action depending on the state
+ if state == 'present':
+ yumrepo.add()
+ elif state == 'absent':
+ yumrepo.remove()
+
+ # Get repo status after change
+ diff['after'] = yumrepo.dump()
+
+ # Compare repo states
+ changed = diff['before'] != diff['after']
+
+ # Save the file only if not in check mode and if there was a change
+ if not module.check_mode and changed:
+ yumrepo.save()
+
+ # Change file attributes if needed
+ if os.path.isfile(module.params['dest']):
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ # Print status of the change
+ module.exit_json(changed=changed, repo=name, state=state, diff=diff)
+
+
+if __name__ == '__main__':
+ main()