diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 16:04:21 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 16:04:21 +0000 |
commit | 8a754e0858d922e955e71b253c139e071ecec432 (patch) | |
tree | 527d16e74bfd1840c85efd675fdecad056c54107 /lib/ansible/plugins/action | |
parent | Initial commit. (diff) | |
download | ansible-core-upstream.tar.xz ansible-core-upstream.zip |
Adding upstream version 2.14.3.upstream/2.14.3upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'lib/ansible/plugins/action')
29 files changed, 5426 insertions, 0 deletions
diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py new file mode 100644 index 0000000..d199207 --- /dev/null +++ b/lib/ansible/plugins/action/__init__.py @@ -0,0 +1,1431 @@ +# coding: utf-8 +# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import base64 +import json +import os +import random +import re +import shlex +import stat +import tempfile + +from abc import ABC, abstractmethod +from collections.abc import Sequence + +from ansible import constants as C +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleActionSkip, AnsibleActionFail, AnsibleAuthenticationFailure +from ansible.executor.module_common import modify_module +from ansible.executor.interpreter_discovery import discover_interpreter, InterpreterDiscoveryRequiredError +from ansible.module_utils.common.arg_spec import ArgumentSpecValidator +from ansible.module_utils.errors import UnsupportedError +from ansible.module_utils.json_utils import _filter_non_json_lines +from ansible.module_utils.six import binary_type, string_types, text_type +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.parsing.utils.jsonify import jsonify +from ansible.release import __version__ +from ansible.utils.collection_loader import resource_from_fqcr +from ansible.utils.display import Display +from ansible.utils.unsafe_proxy import wrap_var, AnsibleUnsafeText +from ansible.vars.clean import remove_internal_keys +from ansible.utils.plugin_docs import get_versioned_doclink + +display = Display() + + +class ActionBase(ABC): + + ''' + This class is the base class for all action plugins, and defines + code common to all actions. The base class handles the connection + by putting/getting files and executing commands based on the current + action in use. + ''' + + # A set of valid arguments + _VALID_ARGS = frozenset([]) # type: frozenset[str] + + def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj): + self._task = task + self._connection = connection + self._play_context = play_context + self._loader = loader + self._templar = templar + self._shared_loader_obj = shared_loader_obj + self._cleanup_remote_tmp = False + + self._supports_check_mode = True + self._supports_async = False + + # interpreter discovery state + self._discovered_interpreter_key = None + self._discovered_interpreter = False + self._discovery_deprecation_warnings = [] + self._discovery_warnings = [] + + # Backwards compat: self._display isn't really needed, just import the global display and use that. + self._display = display + + self._used_interpreter = None + + @abstractmethod + def run(self, tmp=None, task_vars=None): + """ Action Plugins should implement this method to perform their + tasks. Everything else in this base class is a helper method for the + action plugin to do that. + + :kwarg tmp: Deprecated parameter. This is no longer used. An action plugin that calls + another one and wants to use the same remote tmp for both should set + self._connection._shell.tmpdir rather than this parameter. + :kwarg task_vars: The variables (host vars, group vars, config vars, + etc) associated with this task. + :returns: dictionary of results from the module + + Implementors of action modules may find the following variables especially useful: + + * Module parameters. These are stored in self._task.args + """ + + # does not default to {'changed': False, 'failed': False}, as it breaks async + result = {} + + if tmp is not None: + result['warning'] = ['ActionModule.run() no longer honors the tmp parameter. Action' + ' plugins should set self._connection._shell.tmpdir to share' + ' the tmpdir'] + del tmp + + if self._task.async_val and not self._supports_async: + raise AnsibleActionFail('async is not supported for this task.') + elif self._task.check_mode and not self._supports_check_mode: + raise AnsibleActionSkip('check mode is not supported for this task.') + elif self._task.async_val and self._task.check_mode: + raise AnsibleActionFail('check mode and async cannot be used on same task.') + + # Error if invalid argument is passed + if self._VALID_ARGS: + task_opts = frozenset(self._task.args.keys()) + bad_opts = task_opts.difference(self._VALID_ARGS) + if bad_opts: + raise AnsibleActionFail('Invalid options for %s: %s' % (self._task.action, ','.join(list(bad_opts)))) + + if self._connection._shell.tmpdir is None and self._early_needs_tmp_path(): + self._make_tmp_path() + + return result + + def validate_argument_spec(self, argument_spec=None, + mutually_exclusive=None, + required_together=None, + required_one_of=None, + required_if=None, + required_by=None, + ): + """Validate an argument spec against the task args + + This will return a tuple of (ValidationResult, dict) where the dict + is the validated, coerced, and normalized task args. + + Be cautious when directly passing ``new_module_args`` directly to a + module invocation, as it will contain the defaults, and not only + the args supplied from the task. If you do this, the module + should not define ``mututally_exclusive`` or similar. + + This code is roughly copied from the ``validate_argument_spec`` + action plugin for use by other action plugins. + """ + + new_module_args = self._task.args.copy() + + validator = ArgumentSpecValidator( + argument_spec, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_one_of=required_one_of, + required_if=required_if, + required_by=required_by, + ) + validation_result = validator.validate(new_module_args) + + new_module_args.update(validation_result.validated_parameters) + + try: + error = validation_result.errors[0] + except IndexError: + error = None + + # Fail for validation errors, even in check mode + if error: + msg = validation_result.errors.msg + if isinstance(error, UnsupportedError): + msg = f"Unsupported parameters for ({self._load_name}) module: {msg}" + + raise AnsibleActionFail(msg) + + return validation_result, new_module_args + + def cleanup(self, force=False): + """Method to perform a clean up at the end of an action plugin execution + + By default this is designed to clean up the shell tmpdir, and is toggled based on whether + async is in use + + Action plugins may override this if they deem necessary, but should still call this method + via super + """ + if force or not self._task.async_val: + self._remove_tmp_path(self._connection._shell.tmpdir) + + def get_plugin_option(self, plugin, option, default=None): + """Helper to get an option from a plugin without having to use + the try/except dance everywhere to set a default + """ + try: + return plugin.get_option(option) + except (AttributeError, KeyError): + return default + + def get_become_option(self, option, default=None): + return self.get_plugin_option(self._connection.become, option, default=default) + + def get_connection_option(self, option, default=None): + return self.get_plugin_option(self._connection, option, default=default) + + def get_shell_option(self, option, default=None): + return self.get_plugin_option(self._connection._shell, option, default=default) + + def _remote_file_exists(self, path): + cmd = self._connection._shell.exists(path) + result = self._low_level_execute_command(cmd=cmd, sudoable=True) + if result['rc'] == 0: + return True + return False + + def _configure_module(self, module_name, module_args, task_vars): + ''' + Handles the loading and templating of the module code through the + modify_module() function. + ''' + if self._task.delegate_to: + use_vars = task_vars.get('ansible_delegated_vars')[self._task.delegate_to] + else: + use_vars = task_vars + + split_module_name = module_name.split('.') + collection_name = '.'.join(split_module_name[0:2]) if len(split_module_name) > 2 else '' + leaf_module_name = resource_from_fqcr(module_name) + + # Search module path(s) for named module. + for mod_type in self._connection.module_implementation_preferences: + # Check to determine if PowerShell modules are supported, and apply + # some fixes (hacks) to module name + args. + if mod_type == '.ps1': + # FIXME: This should be temporary and moved to an exec subsystem plugin where we can define the mapping + # for each subsystem. + win_collection = 'ansible.windows' + rewrite_collection_names = ['ansible.builtin', 'ansible.legacy', ''] + # async_status, win_stat, win_file, win_copy, and win_ping are not just like their + # python counterparts but they are compatible enough for our + # internal usage + # NB: we only rewrite the module if it's not being called by the user (eg, an action calling something else) + # and if it's unqualified or FQ to a builtin + if leaf_module_name in ('stat', 'file', 'copy', 'ping') and \ + collection_name in rewrite_collection_names and self._task.action != module_name: + module_name = '%s.win_%s' % (win_collection, leaf_module_name) + elif leaf_module_name == 'async_status' and collection_name in rewrite_collection_names: + module_name = '%s.%s' % (win_collection, leaf_module_name) + + # TODO: move this tweak down to the modules, not extensible here + # Remove extra quotes surrounding path parameters before sending to module. + if leaf_module_name in ['win_stat', 'win_file', 'win_copy', 'slurp'] and module_args and \ + hasattr(self._connection._shell, '_unquote'): + for key in ('src', 'dest', 'path'): + if key in module_args: + module_args[key] = self._connection._shell._unquote(module_args[key]) + + result = self._shared_loader_obj.module_loader.find_plugin_with_context(module_name, mod_type, collection_list=self._task.collections) + + if not result.resolved: + if result.redirect_list and len(result.redirect_list) > 1: + # take the last one in the redirect list, we may have successfully jumped through N other redirects + target_module_name = result.redirect_list[-1] + + raise AnsibleError("The module {0} was redirected to {1}, which could not be loaded.".format(module_name, target_module_name)) + + module_path = result.plugin_resolved_path + if module_path: + break + else: # This is a for-else: http://bit.ly/1ElPkyg + raise AnsibleError("The module %s was not found in configured module paths" % (module_name)) + + # insert shared code and arguments into the module + final_environment = dict() + self._compute_environment_string(final_environment) + + become_kwargs = {} + if self._connection.become: + become_kwargs['become'] = True + become_kwargs['become_method'] = self._connection.become.name + become_kwargs['become_user'] = self._connection.become.get_option('become_user', + playcontext=self._play_context) + become_kwargs['become_password'] = self._connection.become.get_option('become_pass', + playcontext=self._play_context) + become_kwargs['become_flags'] = self._connection.become.get_option('become_flags', + playcontext=self._play_context) + + # modify_module will exit early if interpreter discovery is required; re-run after if necessary + for dummy in (1, 2): + try: + (module_data, module_style, module_shebang) = modify_module(module_name, module_path, module_args, self._templar, + task_vars=use_vars, + module_compression=self._play_context.module_compression, + async_timeout=self._task.async_val, + environment=final_environment, + remote_is_local=bool(getattr(self._connection, '_remote_is_local', False)), + **become_kwargs) + break + except InterpreterDiscoveryRequiredError as idre: + self._discovered_interpreter = AnsibleUnsafeText(discover_interpreter( + action=self, + interpreter_name=idre.interpreter_name, + discovery_mode=idre.discovery_mode, + task_vars=use_vars)) + + # update the local task_vars with the discovered interpreter (which might be None); + # we'll propagate back to the controller in the task result + discovered_key = 'discovered_interpreter_%s' % idre.interpreter_name + + # update the local vars copy for the retry + use_vars['ansible_facts'][discovered_key] = self._discovered_interpreter + + # TODO: this condition prevents 'wrong host' from being updated + # but in future we would want to be able to update 'delegated host facts' + # irrespective of task settings + if not self._task.delegate_to or self._task.delegate_facts: + # store in local task_vars facts collection for the retry and any other usages in this worker + task_vars['ansible_facts'][discovered_key] = self._discovered_interpreter + # preserve this so _execute_module can propagate back to controller as a fact + self._discovered_interpreter_key = discovered_key + else: + task_vars['ansible_delegated_vars'][self._task.delegate_to]['ansible_facts'][discovered_key] = self._discovered_interpreter + + return (module_style, module_shebang, module_data, module_path) + + def _compute_environment_string(self, raw_environment_out=None): + ''' + Builds the environment string to be used when executing the remote task. + ''' + + final_environment = dict() + if self._task.environment is not None: + environments = self._task.environment + if not isinstance(environments, list): + environments = [environments] + + # The order of environments matters to make sure we merge + # in the parent's values first so those in the block then + # task 'win' in precedence + for environment in environments: + if environment is None or len(environment) == 0: + continue + temp_environment = self._templar.template(environment) + if not isinstance(temp_environment, dict): + raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment))) + # very deliberately using update here instead of combine_vars, as + # these environment settings should not need to merge sub-dicts + final_environment.update(temp_environment) + + if len(final_environment) > 0: + final_environment = self._templar.template(final_environment) + + if isinstance(raw_environment_out, dict): + raw_environment_out.clear() + raw_environment_out.update(final_environment) + + return self._connection._shell.env_prefix(**final_environment) + + def _early_needs_tmp_path(self): + ''' + Determines if a tmp path should be created before the action is executed. + ''' + + return getattr(self, 'TRANSFERS_FILES', False) + + def _is_pipelining_enabled(self, module_style, wrap_async=False): + ''' + Determines if we are required and can do pipelining + ''' + + try: + is_enabled = self._connection.get_option('pipelining') + except (KeyError, AttributeError, ValueError): + is_enabled = self._play_context.pipelining + + # winrm supports async pipeline + # TODO: make other class property 'has_async_pipelining' to separate cases + always_pipeline = self._connection.always_pipeline_modules + + # su does not work with pipelining + # TODO: add has_pipelining class prop to become plugins + become_exception = (self._connection.become.name if self._connection.become else '') != 'su' + + # any of these require a true + conditions = [ + self._connection.has_pipelining, # connection class supports it + is_enabled or always_pipeline, # enabled via config or forced via connection (eg winrm) + module_style == "new", # old style modules do not support pipelining + not C.DEFAULT_KEEP_REMOTE_FILES, # user wants remote files + not wrap_async or always_pipeline, # async does not normally support pipelining unless it does (eg winrm) + become_exception, + ] + + return all(conditions) + + def _get_admin_users(self): + ''' + Returns a list of admin users that are configured for the current shell + plugin + ''' + + return self.get_shell_option('admin_users', ['root']) + + def _get_remote_addr(self, tvars): + ''' consistently get the 'remote_address' for the action plugin ''' + remote_addr = tvars.get('delegated_vars', {}).get('ansible_host', tvars.get('ansible_host', tvars.get('inventory_hostname', None))) + for variation in ('remote_addr', 'host'): + try: + remote_addr = self._connection.get_option(variation) + except KeyError: + continue + break + else: + # plugin does not have, fallback to play_context + remote_addr = self._play_context.remote_addr + return remote_addr + + def _get_remote_user(self): + ''' consistently get the 'remote_user' for the action plugin ''' + # TODO: use 'current user running ansible' as fallback when moving away from play_context + # pwd.getpwuid(os.getuid()).pw_name + remote_user = None + try: + remote_user = self._connection.get_option('remote_user') + except KeyError: + # plugin does not have remote_user option, fallback to default and/play_context + remote_user = getattr(self._connection, 'default_user', None) or self._play_context.remote_user + except AttributeError: + # plugin does not use config system, fallback to old play_context + remote_user = self._play_context.remote_user + return remote_user + + def _is_become_unprivileged(self): + ''' + The user is not the same as the connection user and is not part of the + shell configured admin users + ''' + # if we don't use become then we know we aren't switching to a + # different unprivileged user + if not self._connection.become: + return False + + # if we use become and the user is not an admin (or same user) then + # we need to return become_unprivileged as True + admin_users = self._get_admin_users() + remote_user = self._get_remote_user() + become_user = self.get_become_option('become_user') + return bool(become_user and become_user not in admin_users + [remote_user]) + + def _make_tmp_path(self, remote_user=None): + ''' + Create and return a temporary path on a remote box. + ''' + + # Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host. + # As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user + # This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7 + if getattr(self._connection, '_remote_is_local', False): + tmpdir = C.DEFAULT_LOCAL_TMP + else: + # NOTE: shell plugins should populate this setting anyways, but they dont do remote expansion, which + # we need for 'non posix' systems like cloud-init and solaris + tmpdir = self._remote_expand_user(self.get_shell_option('remote_tmp', default='~/.ansible/tmp'), sudoable=False) + + become_unprivileged = self._is_become_unprivileged() + basefile = self._connection._shell._generate_temp_dir_name() + cmd = self._connection._shell.mkdtemp(basefile=basefile, system=become_unprivileged, tmpdir=tmpdir) + result = self._low_level_execute_command(cmd, sudoable=False) + + # error handling on this seems a little aggressive? + if result['rc'] != 0: + if result['rc'] == 5: + output = 'Authentication failure.' + elif result['rc'] == 255 and self._connection.transport in ('ssh',): + + if display.verbosity > 3: + output = u'SSH encountered an unknown error. The output was:\n%s%s' % (result['stdout'], result['stderr']) + else: + output = (u'SSH encountered an unknown error during the connection. ' + 'We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue') + + elif u'No space left on device' in result['stderr']: + output = result['stderr'] + else: + output = ('Failed to create temporary directory. ' + 'In some cases, you may have been able to authenticate and did not have permissions on the target directory. ' + 'Consider changing the remote tmp path in ansible.cfg to a path rooted in "/tmp", for more error information use -vvv. ' + 'Failed command was: %s, exited with result %d' % (cmd, result['rc'])) + if 'stdout' in result and result['stdout'] != u'': + output = output + u", stdout output: %s" % result['stdout'] + if display.verbosity > 3 and 'stderr' in result and result['stderr'] != u'': + output += u", stderr output: %s" % result['stderr'] + raise AnsibleConnectionFailure(output) + else: + self._cleanup_remote_tmp = True + + try: + stdout_parts = result['stdout'].strip().split('%s=' % basefile, 1) + rc = self._connection._shell.join_path(stdout_parts[-1], u'').splitlines()[-1] + except IndexError: + # stdout was empty or just space, set to / to trigger error in next if + rc = '/' + + # Catch failure conditions, files should never be + # written to locations in /. + if rc == '/': + raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd)) + + self._connection._shell.tmpdir = rc + + return rc + + def _should_remove_tmp_path(self, tmp_path): + '''Determine if temporary path should be deleted or kept by user request/config''' + return tmp_path and self._cleanup_remote_tmp and not C.DEFAULT_KEEP_REMOTE_FILES and "-tmp-" in tmp_path + + def _remove_tmp_path(self, tmp_path, force=False): + '''Remove a temporary path we created. ''' + + if tmp_path is None and self._connection._shell.tmpdir: + tmp_path = self._connection._shell.tmpdir + + if force or self._should_remove_tmp_path(tmp_path): + cmd = self._connection._shell.remove(tmp_path, recurse=True) + # If we have gotten here we have a working connection configuration. + # If the connection breaks we could leave tmp directories out on the remote system. + tmp_rm_res = self._low_level_execute_command(cmd, sudoable=False) + + if tmp_rm_res.get('rc', 0) != 0: + display.warning('Error deleting remote temporary files (rc: %s, stderr: %s})' + % (tmp_rm_res.get('rc'), tmp_rm_res.get('stderr', 'No error string available.'))) + else: + self._connection._shell.tmpdir = None + + def _transfer_file(self, local_path, remote_path): + """ + Copy a file from the controller to a remote path + + :arg local_path: Path on controller to transfer + :arg remote_path: Path on the remote system to transfer into + + .. warning:: + * When you use this function you likely want to use use fixup_perms2() on the + remote_path to make sure that the remote file is readable when the user becomes + a non-privileged user. + * If you use fixup_perms2() on the file and copy or move the file into place, you will + need to then remove filesystem acls on the file once it has been copied into place by + the module. See how the copy module implements this for help. + """ + self._connection.put_file(local_path, remote_path) + return remote_path + + def _transfer_data(self, remote_path, data): + ''' + Copies the module data out to the temporary module path. + ''' + + if isinstance(data, dict): + data = jsonify(data) + + afd, afile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP) + afo = os.fdopen(afd, 'wb') + try: + data = to_bytes(data, errors='surrogate_or_strict') + afo.write(data) + except Exception as e: + raise AnsibleError("failure writing module data to temporary file for transfer: %s" % to_native(e)) + + afo.flush() + afo.close() + + try: + self._transfer_file(afile, remote_path) + finally: + os.unlink(afile) + + return remote_path + + def _fixup_perms2(self, remote_paths, remote_user=None, execute=True): + """ + We need the files we upload to be readable (and sometimes executable) + by the user being sudo'd to but we want to limit other people's access + (because the files could contain passwords or other private + information. We achieve this in one of these ways: + + * If no sudo is performed or the remote_user is sudo'ing to + themselves, we don't have to change permissions. + * If the remote_user sudo's to a privileged user (for instance, root), + we don't have to change permissions + * If the remote_user sudo's to an unprivileged user then we attempt to + grant the unprivileged user access via file system acls. + * If granting file system acls fails we try to change the owner of the + file with chown which only works in case the remote_user is + privileged or the remote systems allows chown calls by unprivileged + users (e.g. HP-UX) + * If the above fails, we next try 'chmod +a' which is a macOS way of + setting ACLs on files. + * If the above fails, we check if ansible_common_remote_group is set. + If it is, we attempt to chgrp the file to its value. This is useful + if the remote_user has a group in common with the become_user. As the + remote_user, we can chgrp the file to that group and allow the + become_user to read it. + * If (the chown fails AND ansible_common_remote_group is not set) OR + (ansible_common_remote_group is set AND the chgrp (or following chmod) + returned non-zero), we can set the file to be world readable so that + the second unprivileged user can read the file. + Since this could allow other users to get access to private + information we only do this if ansible is configured with + "allow_world_readable_tmpfiles" in the ansible.cfg. Also note that + when ansible_common_remote_group is set this final fallback is very + unlikely to ever be triggered, so long as chgrp was successful. But + just because the chgrp was successful, does not mean Ansible can + necessarily access the files (if, for example, the variable was set + to a group that remote_user is in, and can chgrp to, but does not have + in common with become_user). + """ + if remote_user is None: + remote_user = self._get_remote_user() + + # Step 1: Are we on windows? + if getattr(self._connection._shell, "_IS_WINDOWS", False): + # This won't work on Powershell as-is, so we'll just completely + # skip until we have a need for it, at which point we'll have to do + # something different. + return remote_paths + + # Step 2: If we're not becoming an unprivileged user, we are roughly + # done. Make the files +x if we're asked to, and return. + if not self._is_become_unprivileged(): + if execute: + # Can't depend on the file being transferred with execute permissions. + # Only need user perms because no become was used here + res = self._remote_chmod(remote_paths, 'u+x') + if res['rc'] != 0: + raise AnsibleError( + 'Failed to set execute bit on remote files ' + '(rc: {0}, err: {1})'.format( + res['rc'], + to_native(res['stderr']))) + return remote_paths + + # If we're still here, we have an unprivileged user that's different + # than the ssh user. + become_user = self.get_become_option('become_user') + + # Try to use file system acls to make the files readable for sudo'd + # user + if execute: + chmod_mode = 'rx' + setfacl_mode = 'r-x' + # Apple patches their "file_cmds" chmod with ACL support + chmod_acl_mode = '{0} allow read,execute'.format(become_user) + # POSIX-draft ACL specification. Solaris, maybe others. + # See chmod(1) on something Solaris-based for syntax details. + posix_acl_mode = 'A+user:{0}:rx:allow'.format(become_user) + else: + chmod_mode = 'rX' + # TODO: this form fails silently on freebsd. We currently + # never call _fixup_perms2() with execute=False but if we + # start to we'll have to fix this. + setfacl_mode = 'r-X' + # Apple + chmod_acl_mode = '{0} allow read'.format(become_user) + # POSIX-draft + posix_acl_mode = 'A+user:{0}:r:allow'.format(become_user) + + # Step 3a: Are we able to use setfacl to add user ACLs to the file? + res = self._remote_set_user_facl( + remote_paths, + become_user, + setfacl_mode) + + if res['rc'] == 0: + return remote_paths + + # Step 3b: Set execute if we need to. We do this before anything else + # because some of the methods below might work but not let us set +x + # as part of them. + if execute: + res = self._remote_chmod(remote_paths, 'u+x') + if res['rc'] != 0: + raise AnsibleError( + 'Failed to set file mode or acl on remote temporary files ' + '(rc: {0}, err: {1})'.format( + res['rc'], + to_native(res['stderr']))) + + # Step 3c: File system ACLs failed above; try falling back to chown. + res = self._remote_chown(remote_paths, become_user) + if res['rc'] == 0: + return remote_paths + + # Check if we are an admin/root user. If we are and got here, it means + # we failed to chown as root and something weird has happened. + if remote_user in self._get_admin_users(): + raise AnsibleError( + 'Failed to change ownership of the temporary files Ansible ' + '(via chmod nor setfacl) needs to create despite connecting as a ' + 'privileged user. Unprivileged become user would be unable to read' + ' the file.') + + # Step 3d: Try macOS's special chmod + ACL + # macOS chmod's +a flag takes its own argument. As a slight hack, we + # pass that argument as the first element of remote_paths. So we end + # up running `chmod +a [that argument] [file 1] [file 2] ...` + try: + res = self._remote_chmod([chmod_acl_mode] + list(remote_paths), '+a') + except AnsibleAuthenticationFailure as e: + # Solaris-based chmod will return 5 when it sees an invalid mode, + # and +a is invalid there. Because it returns 5, which is the same + # thing sshpass returns on auth failure, our sshpass code will + # assume that auth failed. If we don't handle that case here, none + # of the other logic below will get run. This is fairly hacky and a + # corner case, but probably one that shows up pretty often in + # Solaris-based environments (and possibly others). + pass + else: + if res['rc'] == 0: + return remote_paths + + # Step 3e: Try Solaris/OpenSolaris/OpenIndiana-sans-setfacl chmod + # Similar to macOS above, Solaris 11.4 drops setfacl and takes file ACLs + # via chmod instead. OpenSolaris and illumos-based distros allow for + # using either setfacl or chmod, and compatibility depends on filesystem. + # It should be possible to debug this branch by installing OpenIndiana + # (use ZFS) and going unpriv -> unpriv. + res = self._remote_chmod(remote_paths, posix_acl_mode) + if res['rc'] == 0: + return remote_paths + + # we'll need this down here + become_link = get_versioned_doclink('user_guide/become.html') + + # Step 3f: Common group + # Otherwise, we're a normal user. We failed to chown the paths to the + # unprivileged user, but if we have a common group with them, we should + # be able to chown it to that. + # + # Note that we have no way of knowing if this will actually work... just + # because chgrp exits successfully does not mean that Ansible will work. + # We could check if the become user is in the group, but this would + # create an extra round trip. + # + # Also note that due to the above, this can prevent the + # world_readable_temp logic below from ever getting called. We + # leave this up to the user to rectify if they have both of these + # features enabled. + group = self.get_shell_option('common_remote_group') + if group is not None: + res = self._remote_chgrp(remote_paths, group) + if res['rc'] == 0: + # warn user that something might go weirdly here. + if self.get_shell_option('world_readable_temp'): + display.warning( + 'Both common_remote_group and ' + 'allow_world_readable_tmpfiles are set. chgrp was ' + 'successful, but there is no guarantee that Ansible ' + 'will be able to read the files after this operation, ' + 'particularly if common_remote_group was set to a ' + 'group of which the unprivileged become user is not a ' + 'member. In this situation, ' + 'allow_world_readable_tmpfiles is a no-op. See this ' + 'URL for more details: %s' + '#risks-of-becoming-an-unprivileged-user' % become_link) + if execute: + group_mode = 'g+rwx' + else: + group_mode = 'g+rw' + res = self._remote_chmod(remote_paths, group_mode) + if res['rc'] == 0: + return remote_paths + + # Step 4: World-readable temp directory + if self.get_shell_option('world_readable_temp'): + # chown and fs acls failed -- do things this insecure way only if + # the user opted in in the config file + display.warning( + 'Using world-readable permissions for temporary files Ansible ' + 'needs to create when becoming an unprivileged user. This may ' + 'be insecure. For information on securing this, see %s' + '#risks-of-becoming-an-unprivileged-user' % become_link) + res = self._remote_chmod(remote_paths, 'a+%s' % chmod_mode) + if res['rc'] == 0: + return remote_paths + raise AnsibleError( + 'Failed to set file mode on remote files ' + '(rc: {0}, err: {1})'.format( + res['rc'], + to_native(res['stderr']))) + + raise AnsibleError( + 'Failed to set permissions on the temporary files Ansible needs ' + 'to create when becoming an unprivileged user ' + '(rc: %s, err: %s}). For information on working around this, see %s' + '#risks-of-becoming-an-unprivileged-user' % ( + res['rc'], + to_native(res['stderr']), become_link)) + + def _remote_chmod(self, paths, mode, sudoable=False): + ''' + Issue a remote chmod command + ''' + cmd = self._connection._shell.chmod(paths, mode) + res = self._low_level_execute_command(cmd, sudoable=sudoable) + return res + + def _remote_chown(self, paths, user, sudoable=False): + ''' + Issue a remote chown command + ''' + cmd = self._connection._shell.chown(paths, user) + res = self._low_level_execute_command(cmd, sudoable=sudoable) + return res + + def _remote_chgrp(self, paths, group, sudoable=False): + ''' + Issue a remote chgrp command + ''' + cmd = self._connection._shell.chgrp(paths, group) + res = self._low_level_execute_command(cmd, sudoable=sudoable) + return res + + def _remote_set_user_facl(self, paths, user, mode, sudoable=False): + ''' + Issue a remote call to setfacl + ''' + cmd = self._connection._shell.set_user_facl(paths, user, mode) + res = self._low_level_execute_command(cmd, sudoable=sudoable) + return res + + def _execute_remote_stat(self, path, all_vars, follow, tmp=None, checksum=True): + ''' + Get information from remote file. + ''' + if tmp is not None: + display.warning('_execute_remote_stat no longer honors the tmp parameter. Action' + ' plugins should set self._connection._shell.tmpdir to share' + ' the tmpdir') + del tmp # No longer used + + module_args = dict( + path=path, + follow=follow, + get_checksum=checksum, + checksum_algorithm='sha1', + ) + mystat = self._execute_module(module_name='ansible.legacy.stat', module_args=module_args, task_vars=all_vars, + wrap_async=False) + + if mystat.get('failed'): + msg = mystat.get('module_stderr') + if not msg: + msg = mystat.get('module_stdout') + if not msg: + msg = mystat.get('msg') + raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, msg)) + + if not mystat['stat']['exists']: + # empty might be matched, 1 should never match, also backwards compatible + mystat['stat']['checksum'] = '1' + + # happens sometimes when it is a dir and not on bsd + if 'checksum' not in mystat['stat']: + mystat['stat']['checksum'] = '' + elif not isinstance(mystat['stat']['checksum'], string_types): + raise AnsibleError("Invalid checksum returned by stat: expected a string type but got %s" % type(mystat['stat']['checksum'])) + + return mystat['stat'] + + def _remote_checksum(self, path, all_vars, follow=False): + """Deprecated. Use _execute_remote_stat() instead. + + Produces a remote checksum given a path, + Returns a number 0-4 for specific errors instead of checksum, also ensures it is different + 0 = unknown error + 1 = file does not exist, this might not be an error + 2 = permissions issue + 3 = its a directory, not a file + 4 = stat module failed, likely due to not finding python + 5 = appropriate json module not found + """ + self._display.deprecated("The '_remote_checksum()' method is deprecated. " + "The plugin author should update the code to use '_execute_remote_stat()' instead", "2.16") + x = "0" # unknown error has occurred + try: + remote_stat = self._execute_remote_stat(path, all_vars, follow=follow) + if remote_stat['exists'] and remote_stat['isdir']: + x = "3" # its a directory not a file + else: + x = remote_stat['checksum'] # if 1, file is missing + except AnsibleError as e: + errormsg = to_text(e) + if errormsg.endswith(u'Permission denied'): + x = "2" # cannot read file + elif errormsg.endswith(u'MODULE FAILURE'): + x = "4" # python not found or module uncaught exception + elif 'json' in errormsg: + x = "5" # json module needed + finally: + return x # pylint: disable=lost-exception + + def _remote_expand_user(self, path, sudoable=True, pathsep=None): + ''' takes a remote path and performs tilde/$HOME expansion on the remote host ''' + + # We only expand ~/path and ~username/path + if not path.startswith('~'): + return path + + # Per Jborean, we don't have to worry about Windows as we don't have a notion of user's home + # dir there. + split_path = path.split(os.path.sep, 1) + expand_path = split_path[0] + + if expand_path == '~': + # Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host. + # As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user + # This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7 + become_user = self.get_become_option('become_user') + if getattr(self._connection, '_remote_is_local', False): + pass + elif sudoable and self._connection.become and become_user: + expand_path = '~%s' % become_user + else: + # use remote user instead, if none set default to current user + expand_path = '~%s' % (self._get_remote_user() or '') + + # use shell to construct appropriate command and execute + cmd = self._connection._shell.expand_user(expand_path) + data = self._low_level_execute_command(cmd, sudoable=False) + + try: + initial_fragment = data['stdout'].strip().splitlines()[-1] + except IndexError: + initial_fragment = None + + if not initial_fragment: + # Something went wrong trying to expand the path remotely. Try using pwd, if not, return + # the original string + cmd = self._connection._shell.pwd() + pwd = self._low_level_execute_command(cmd, sudoable=False).get('stdout', '').strip() + if pwd: + expanded = pwd + else: + expanded = path + + elif len(split_path) > 1: + expanded = self._connection._shell.join_path(initial_fragment, *split_path[1:]) + else: + expanded = initial_fragment + + if '..' in os.path.dirname(expanded).split('/'): + raise AnsibleError("'%s' returned an invalid relative home directory path containing '..'" % self._get_remote_addr({})) + + return expanded + + def _strip_success_message(self, data): + ''' + Removes the BECOME-SUCCESS message from the data. + ''' + if data.strip().startswith('BECOME-SUCCESS-'): + data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data) + return data + + def _update_module_args(self, module_name, module_args, task_vars): + + # set check mode in the module arguments, if required + if self._task.check_mode: + if not self._supports_check_mode: + raise AnsibleError("check mode is not supported for this operation") + module_args['_ansible_check_mode'] = True + else: + module_args['_ansible_check_mode'] = False + + # set no log in the module arguments, if required + no_target_syslog = C.config.get_config_value('DEFAULT_NO_TARGET_SYSLOG', variables=task_vars) + module_args['_ansible_no_log'] = self._task.no_log or no_target_syslog + + # set debug in the module arguments, if required + module_args['_ansible_debug'] = C.DEFAULT_DEBUG + + # let module know we are in diff mode + module_args['_ansible_diff'] = self._task.diff + + # let module know our verbosity + module_args['_ansible_verbosity'] = display.verbosity + + # give the module information about the ansible version + module_args['_ansible_version'] = __version__ + + # give the module information about its name + module_args['_ansible_module_name'] = module_name + + # set the syslog facility to be used in the module + module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY) + + # let module know about filesystems that selinux treats specially + module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS + + # what to do when parameter values are converted to strings + module_args['_ansible_string_conversion_action'] = C.STRING_CONVERSION_ACTION + + # give the module the socket for persistent connections + module_args['_ansible_socket'] = getattr(self._connection, 'socket_path') + if not module_args['_ansible_socket']: + module_args['_ansible_socket'] = task_vars.get('ansible_socket') + + # make sure all commands use the designated shell executable + module_args['_ansible_shell_executable'] = self._play_context.executable + + # make sure modules are aware if they need to keep the remote files + module_args['_ansible_keep_remote_files'] = C.DEFAULT_KEEP_REMOTE_FILES + + # make sure all commands use the designated temporary directory if created + if self._is_become_unprivileged(): # force fallback on remote_tmp as user cannot normally write to dir + module_args['_ansible_tmpdir'] = None + else: + module_args['_ansible_tmpdir'] = self._connection._shell.tmpdir + + # make sure the remote_tmp value is sent through in case modules needs to create their own + module_args['_ansible_remote_tmp'] = self.get_shell_option('remote_tmp', default='~/.ansible/tmp') + + def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=None, wrap_async=False): + ''' + Transfer and run a module along with its arguments. + ''' + if tmp is not None: + display.warning('_execute_module no longer honors the tmp parameter. Action plugins' + ' should set self._connection._shell.tmpdir to share the tmpdir') + del tmp # No longer used + if delete_remote_tmp is not None: + display.warning('_execute_module no longer honors the delete_remote_tmp parameter.' + ' Action plugins should check self._connection._shell.tmpdir to' + ' see if a tmpdir existed before they were called to determine' + ' if they are responsible for removing it.') + del delete_remote_tmp # No longer used + + tmpdir = self._connection._shell.tmpdir + + # We set the module_style to new here so the remote_tmp is created + # before the module args are built if remote_tmp is needed (async). + # If the module_style turns out to not be new and we didn't create the + # remote tmp here, it will still be created. This must be done before + # calling self._update_module_args() so the module wrapper has the + # correct remote_tmp value set + if not self._is_pipelining_enabled("new", wrap_async) and tmpdir is None: + self._make_tmp_path() + tmpdir = self._connection._shell.tmpdir + + if task_vars is None: + task_vars = dict() + + # if a module name was not specified for this execution, use the action from the task + if module_name is None: + module_name = self._task.action + if module_args is None: + module_args = self._task.args + + self._update_module_args(module_name, module_args, task_vars) + + remove_async_dir = None + if wrap_async or self._task.async_val: + async_dir = self.get_shell_option('async_dir', default="~/.ansible_async") + remove_async_dir = len(self._task.environment) + self._task.environment.append({"ANSIBLE_ASYNC_DIR": async_dir}) + + # FUTURE: refactor this along with module build process to better encapsulate "smart wrapper" functionality + (module_style, shebang, module_data, module_path) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars) + display.vvv("Using module file %s" % module_path) + if not shebang and module_style != 'binary': + raise AnsibleError("module (%s) is missing interpreter line" % module_name) + + self._used_interpreter = shebang + remote_module_path = None + + if not self._is_pipelining_enabled(module_style, wrap_async): + # we might need remote tmp dir + if tmpdir is None: + self._make_tmp_path() + tmpdir = self._connection._shell.tmpdir + + remote_module_filename = self._connection._shell.get_remote_filename(module_path) + remote_module_path = self._connection._shell.join_path(tmpdir, 'AnsiballZ_%s' % remote_module_filename) + + args_file_path = None + if module_style in ('old', 'non_native_want_json', 'binary'): + # we'll also need a tmp file to hold our module arguments + args_file_path = self._connection._shell.join_path(tmpdir, 'args') + + if remote_module_path or module_style != 'new': + display.debug("transferring module to remote %s" % remote_module_path) + if module_style == 'binary': + self._transfer_file(module_path, remote_module_path) + else: + self._transfer_data(remote_module_path, module_data) + if module_style == 'old': + # we need to dump the module args to a k=v string in a file on + # the remote system, which can be read and parsed by the module + args_data = "" + for k, v in module_args.items(): + args_data += '%s=%s ' % (k, shlex.quote(text_type(v))) + self._transfer_data(args_file_path, args_data) + elif module_style in ('non_native_want_json', 'binary'): + self._transfer_data(args_file_path, json.dumps(module_args)) + display.debug("done transferring module to remote") + + environment_string = self._compute_environment_string() + + # remove the ANSIBLE_ASYNC_DIR env entry if we added a temporary one for + # the async_wrapper task. + if remove_async_dir is not None: + del self._task.environment[remove_async_dir] + + remote_files = [] + if tmpdir and remote_module_path: + remote_files = [tmpdir, remote_module_path] + + if args_file_path: + remote_files.append(args_file_path) + + sudoable = True + in_data = None + cmd = "" + + if wrap_async and not self._connection.always_pipeline_modules: + # configure, upload, and chmod the async_wrapper module + (async_module_style, shebang, async_module_data, async_module_path) = self._configure_module( + module_name='ansible.legacy.async_wrapper', module_args=dict(), task_vars=task_vars) + async_module_remote_filename = self._connection._shell.get_remote_filename(async_module_path) + remote_async_module_path = self._connection._shell.join_path(tmpdir, async_module_remote_filename) + self._transfer_data(remote_async_module_path, async_module_data) + remote_files.append(remote_async_module_path) + + async_limit = self._task.async_val + async_jid = str(random.randint(0, 999999999999)) + + # call the interpreter for async_wrapper directly + # this permits use of a script for an interpreter on non-Linux platforms + interpreter = shebang.replace('#!', '').strip() + async_cmd = [interpreter, remote_async_module_path, async_jid, async_limit, remote_module_path] + + if environment_string: + async_cmd.insert(0, environment_string) + + if args_file_path: + async_cmd.append(args_file_path) + else: + # maintain a fixed number of positional parameters for async_wrapper + async_cmd.append('_') + + if not self._should_remove_tmp_path(tmpdir): + async_cmd.append("-preserve_tmp") + + cmd = " ".join(to_text(x) for x in async_cmd) + + else: + + if self._is_pipelining_enabled(module_style): + in_data = module_data + display.vvv("Pipelining is enabled.") + else: + cmd = remote_module_path + + cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path).strip() + + # Fix permissions of the tmpdir path and tmpdir files. This should be called after all + # files have been transferred. + if remote_files: + # remove none/empty + remote_files = [x for x in remote_files if x] + self._fixup_perms2(remote_files, self._get_remote_user()) + + # actually execute + res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data) + + # parse the main result + data = self._parse_returned_data(res) + + # NOTE: INTERNAL KEYS ONLY ACCESSIBLE HERE + # get internal info before cleaning + if data.pop("_ansible_suppress_tmpdir_delete", False): + self._cleanup_remote_tmp = False + + # NOTE: yum returns results .. but that made it 'compatible' with squashing, so we allow mappings, for now + if 'results' in data and (not isinstance(data['results'], Sequence) or isinstance(data['results'], string_types)): + data['ansible_module_results'] = data['results'] + del data['results'] + display.warning("Found internal 'results' key in module return, renamed to 'ansible_module_results'.") + + # remove internal keys + remove_internal_keys(data) + + if wrap_async: + # async_wrapper will clean up its tmpdir on its own so we want the controller side to + # forget about it now + self._connection._shell.tmpdir = None + + # FIXME: for backwards compat, figure out if still makes sense + data['changed'] = True + + # pre-split stdout/stderr into lines if needed + if 'stdout' in data and 'stdout_lines' not in data: + # if the value is 'False', a default won't catch it. + txt = data.get('stdout', None) or u'' + data['stdout_lines'] = txt.splitlines() + if 'stderr' in data and 'stderr_lines' not in data: + # if the value is 'False', a default won't catch it. + txt = data.get('stderr', None) or u'' + data['stderr_lines'] = txt.splitlines() + + # propagate interpreter discovery results back to the controller + if self._discovered_interpreter_key: + if data.get('ansible_facts') is None: + data['ansible_facts'] = {} + + data['ansible_facts'][self._discovered_interpreter_key] = self._discovered_interpreter + + if self._discovery_warnings: + if data.get('warnings') is None: + data['warnings'] = [] + data['warnings'].extend(self._discovery_warnings) + + if self._discovery_deprecation_warnings: + if data.get('deprecations') is None: + data['deprecations'] = [] + data['deprecations'].extend(self._discovery_deprecation_warnings) + + # mark the entire module results untrusted as a template right here, since the current action could + # possibly template one of these values. + data = wrap_var(data) + + display.debug("done with _execute_module (%s, %s)" % (module_name, module_args)) + return data + + def _parse_returned_data(self, res): + try: + filtered_output, warnings = _filter_non_json_lines(res.get('stdout', u''), objects_only=True) + for w in warnings: + display.warning(w) + + data = json.loads(filtered_output) + data['_ansible_parsed'] = True + except ValueError: + # not valid json, lets try to capture error + data = dict(failed=True, _ansible_parsed=False) + data['module_stdout'] = res.get('stdout', u'') + if 'stderr' in res: + data['module_stderr'] = res['stderr'] + if res['stderr'].startswith(u'Traceback'): + data['exception'] = res['stderr'] + + # in some cases a traceback will arrive on stdout instead of stderr, such as when using ssh with -tt + if 'exception' not in data and data['module_stdout'].startswith(u'Traceback'): + data['exception'] = data['module_stdout'] + + # The default + data['msg'] = "MODULE FAILURE" + + # try to figure out if we are missing interpreter + if self._used_interpreter is not None: + interpreter = re.escape(self._used_interpreter.lstrip('!#')) + match = re.compile('%s: (?:No such file or directory|not found)' % interpreter) + if match.search(data['module_stderr']) or match.search(data['module_stdout']): + data['msg'] = "The module failed to execute correctly, you probably need to set the interpreter." + + # always append hint + data['msg'] += '\nSee stdout/stderr for the exact error' + + if 'rc' in res: + data['rc'] = res['rc'] + return data + + # FIXME: move to connection base + def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=None, encoding_errors='surrogate_then_replace', chdir=None): + ''' + This is the function which executes the low level shell command, which + may be commands to create/remove directories for temporary files, or to + run the module code or python directly when pipelining. + + :kwarg encoding_errors: If the value returned by the command isn't + utf-8 then we have to figure out how to transform it to unicode. + If the value is just going to be displayed to the user (or + discarded) then the default of 'replace' is fine. If the data is + used as a key or is going to be written back out to a file + verbatim, then this won't work. May have to use some sort of + replacement strategy (python3 could use surrogateescape) + :kwarg chdir: cd into this directory before executing the command. + ''' + + display.debug("_low_level_execute_command(): starting") + # if not cmd: + # # this can happen with powershell modules when there is no analog to a Windows command (like chmod) + # display.debug("_low_level_execute_command(): no command, exiting") + # return dict(stdout='', stderr='', rc=254) + + if chdir: + display.debug("_low_level_execute_command(): changing cwd to %s for this command" % chdir) + cmd = self._connection._shell.append_command('cd %s' % chdir, cmd) + + # https://github.com/ansible/ansible/issues/68054 + if executable: + self._connection._shell.executable = executable + + ruser = self._get_remote_user() + buser = self.get_become_option('become_user') + if (sudoable and self._connection.become and # if sudoable and have become + resource_from_fqcr(self._connection.transport) != 'network_cli' and # if not using network_cli + (C.BECOME_ALLOW_SAME_USER or (buser != ruser or not any((ruser, buser))))): # if we allow same user PE or users are different and either is set + display.debug("_low_level_execute_command(): using become for this command") + cmd = self._connection.become.build_become_command(cmd, self._connection._shell) + + if self._connection.allow_executable: + if executable is None: + executable = self._play_context.executable + # mitigation for SSH race which can drop stdout (https://github.com/ansible/ansible/issues/13876) + # only applied for the default executable to avoid interfering with the raw action + cmd = self._connection._shell.append_command(cmd, 'sleep 0') + if executable: + cmd = executable + ' -c ' + shlex.quote(cmd) + + display.debug("_low_level_execute_command(): executing: %s" % (cmd,)) + + # Change directory to basedir of task for command execution when connection is local + if self._connection.transport == 'local': + self._connection.cwd = to_bytes(self._loader.get_basedir(), errors='surrogate_or_strict') + + rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) + + # stdout and stderr may be either a file-like or a bytes object. + # Convert either one to a text type + if isinstance(stdout, binary_type): + out = to_text(stdout, errors=encoding_errors) + elif not isinstance(stdout, text_type): + out = to_text(b''.join(stdout.readlines()), errors=encoding_errors) + else: + out = stdout + + if isinstance(stderr, binary_type): + err = to_text(stderr, errors=encoding_errors) + elif not isinstance(stderr, text_type): + err = to_text(b''.join(stderr.readlines()), errors=encoding_errors) + else: + err = stderr + + if rc is None: + rc = 0 + + # be sure to remove the BECOME-SUCCESS message now + out = self._strip_success_message(out) + + display.debug(u"_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, out, err)) + return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err, stderr_lines=err.splitlines()) + + def _get_diff_data(self, destination, source, task_vars, source_file=True): + + # Note: Since we do not diff the source and destination before we transform from bytes into + # text the diff between source and destination may not be accurate. To fix this, we'd need + # to move the diffing from the callback plugins into here. + # + # Example of data which would cause trouble is src_content == b'\xff' and dest_content == + # b'\xfe'. Neither of those are valid utf-8 so both get turned into the replacement + # character: diff['before'] = u'�' ; diff['after'] = u'�' When the callback plugin later + # diffs before and after it shows an empty diff. + + diff = {} + display.debug("Going to peek to see if file has changed permissions") + peek_result = self._execute_module( + module_name='ansible.legacy.file', module_args=dict(path=destination, _diff_peek=True), + task_vars=task_vars, persist_files=True) + + if peek_result.get('failed', False): + display.warning(u"Failed to get diff between '%s' and '%s': %s" % (os.path.basename(source), destination, to_text(peek_result.get(u'msg', u'')))) + return diff + + if peek_result.get('rc', 0) == 0: + + if peek_result.get('state') in (None, 'absent'): + diff['before'] = u'' + elif peek_result.get('appears_binary'): + diff['dst_binary'] = 1 + elif peek_result.get('size') and C.MAX_FILE_SIZE_FOR_DIFF > 0 and peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF: + diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF + else: + display.debug(u"Slurping the file %s" % source) + dest_result = self._execute_module( + module_name='ansible.legacy.slurp', module_args=dict(path=destination), + task_vars=task_vars, persist_files=True) + if 'content' in dest_result: + dest_contents = dest_result['content'] + if dest_result['encoding'] == u'base64': + dest_contents = base64.b64decode(dest_contents) + else: + raise AnsibleError("unknown encoding in content option, failed: %s" % to_native(dest_result)) + diff['before_header'] = destination + diff['before'] = to_text(dest_contents) + + if source_file: + st = os.stat(source) + if C.MAX_FILE_SIZE_FOR_DIFF > 0 and st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF: + diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF + else: + display.debug("Reading local copy of the file %s" % source) + try: + with open(source, 'rb') as src: + src_contents = src.read() + except Exception as e: + raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, to_native(e))) + + if b"\x00" in src_contents: + diff['src_binary'] = 1 + else: + diff['after_header'] = source + diff['after'] = to_text(src_contents) + else: + display.debug(u"source of file passed in") + diff['after_header'] = u'dynamically generated' + diff['after'] = source + + if self._task.no_log: + if 'before' in diff: + diff["before"] = u"" + if 'after' in diff: + diff["after"] = u" [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]\n" + + return diff + + def _find_needle(self, dirname, needle): + ''' + find a needle in haystack of paths, optionally using 'dirname' as a subdir. + This will build the ordered list of paths to search and pass them to dwim + to get back the first existing file found. + ''' + + # dwim already deals with playbook basedirs + path_stack = self._task.get_search_path() + + # if missing it will return a file not found exception + return self._loader.path_dwim_relative_stack(path_stack, dirname, needle) diff --git a/lib/ansible/plugins/action/add_host.py b/lib/ansible/plugins/action/add_host.py new file mode 100644 index 0000000..e569739 --- /dev/null +++ b/lib/ansible/plugins/action/add_host.py @@ -0,0 +1,98 @@ +# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright 2012, Seth Vidal <skvidal@fedoraproject.org> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from collections.abc import Mapping + +from ansible.errors import AnsibleActionFail +from ansible.module_utils.six import string_types +from ansible.plugins.action import ActionBase +from ansible.parsing.utils.addresses import parse_address +from ansible.utils.display import Display +from ansible.utils.vars import combine_vars + +display = Display() + + +class ActionModule(ActionBase): + ''' Create inventory hosts and groups in the memory inventory''' + + # We need to be able to modify the inventory + BYPASS_HOST_LOOP = True + TRANSFERS_FILES = False + + def run(self, tmp=None, task_vars=None): + + self._supports_check_mode = True + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + args = self._task.args + raw = args.pop('_raw_params', {}) + if isinstance(raw, Mapping): + # TODO: create 'conflict' detection in base class to deal with repeats and aliases and warn user + args = combine_vars(raw, args) + else: + raise AnsibleActionFail('Invalid raw parameters passed, requires a dictonary/mapping got a %s' % type(raw)) + + # Parse out any hostname:port patterns + new_name = args.get('name', args.get('hostname', args.get('host', None))) + if new_name is None: + raise AnsibleActionFail('name, host or hostname needs to be provided') + + display.vv("creating host via 'add_host': hostname=%s" % new_name) + + try: + name, port = parse_address(new_name, allow_ranges=False) + except Exception: + # not a parsable hostname, but might still be usable + name = new_name + port = None + + if port: + args['ansible_ssh_port'] = port + + groups = args.get('groupname', args.get('groups', args.get('group', ''))) + # add it to the group if that was specified + new_groups = [] + if groups: + if isinstance(groups, list): + group_list = groups + elif isinstance(groups, string_types): + group_list = groups.split(",") + else: + raise AnsibleActionFail("Groups must be specified as a list.", obj=self._task) + + for group_name in group_list: + if group_name not in new_groups: + new_groups.append(group_name.strip()) + + # Add any variables to the new_host + host_vars = dict() + special_args = frozenset(('name', 'hostname', 'groupname', 'groups')) + for k in args.keys(): + if k not in special_args: + host_vars[k] = args[k] + + result['changed'] = False + result['add_host'] = dict(host_name=name, groups=new_groups, host_vars=host_vars) + return result diff --git a/lib/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py new file mode 100644 index 0000000..06fa2df --- /dev/null +++ b/lib/ansible/plugins/action/assemble.py @@ -0,0 +1,166 @@ +# (c) 2013-2016, Michael DeHaan <michael.dehaan@gmail.com> +# Stephen Fromm <sfromm@gmail.com> +# Brian Coca <briancoca+dev@gmail.com> +# Toshio Kuratomi <tkuratomi@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import codecs +import os +import os.path +import re +import tempfile + +from ansible import constants as C +from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase +from ansible.utils.hashing import checksum_s + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + + def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, decrypt=True): + ''' assemble a file from a directory of fragments ''' + + tmpfd, temp_path = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP) + tmp = os.fdopen(tmpfd, 'wb') + delimit_me = False + add_newline = False + + for f in (to_text(p, errors='surrogate_or_strict') for p in sorted(os.listdir(src_path))): + if compiled_regexp and not compiled_regexp.search(f): + continue + fragment = u"%s/%s" % (src_path, f) + if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')): + continue + + with open(self._loader.get_real_file(fragment, decrypt=decrypt), 'rb') as fragment_fh: + fragment_content = fragment_fh.read() + + # always put a newline between fragments if the previous fragment didn't end with a newline. + if add_newline: + tmp.write(b'\n') + + # delimiters should only appear between fragments + if delimit_me: + if delimiter: + # un-escape anything like newlines + delimiter = codecs.escape_decode(delimiter)[0] + tmp.write(delimiter) + # always make sure there's a newline after the + # delimiter, so lines don't run together + if delimiter[-1] != b'\n': + tmp.write(b'\n') + + tmp.write(fragment_content) + delimit_me = True + if fragment_content.endswith(b'\n'): + add_newline = False + else: + add_newline = True + + tmp.close() + return temp_path + + def run(self, tmp=None, task_vars=None): + + self._supports_check_mode = False + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + if task_vars is None: + task_vars = dict() + + src = self._task.args.get('src', None) + dest = self._task.args.get('dest', None) + delimiter = self._task.args.get('delimiter', None) + remote_src = self._task.args.get('remote_src', 'yes') + regexp = self._task.args.get('regexp', None) + follow = self._task.args.get('follow', False) + ignore_hidden = self._task.args.get('ignore_hidden', False) + decrypt = self._task.args.pop('decrypt', True) + + try: + if src is None or dest is None: + raise AnsibleActionFail("src and dest are required") + + if boolean(remote_src, strict=False): + # call assemble via ansible.legacy to allow library/ overrides of the module without collection search + result.update(self._execute_module(module_name='ansible.legacy.assemble', task_vars=task_vars)) + raise _AnsibleActionDone() + else: + try: + src = self._find_needle('files', src) + except AnsibleError as e: + raise AnsibleActionFail(to_native(e)) + + if not os.path.isdir(src): + raise AnsibleActionFail(u"Source (%s) is not a directory" % src) + + _re = None + if regexp is not None: + _re = re.compile(regexp) + + # Does all work assembling the file + path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden, decrypt) + + path_checksum = checksum_s(path) + dest = self._remote_expand_user(dest) + dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow) + + diff = {} + + # setup args for running modules + new_module_args = self._task.args.copy() + + # clean assemble specific options + for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden', 'decrypt']: + if opt in new_module_args: + del new_module_args[opt] + new_module_args['dest'] = dest + + if path_checksum != dest_stat['checksum']: + + if self._play_context.diff: + diff = self._get_diff_data(dest, path, task_vars) + + remote_path = self._connection._shell.join_path(self._connection._shell.tmpdir, 'src') + xfered = self._transfer_file(path, remote_path) + + # fix file permissions when the copy is done as a different user + self._fixup_perms2((self._connection._shell.tmpdir, remote_path)) + + new_module_args.update(dict(src=xfered,)) + + res = self._execute_module(module_name='ansible.legacy.copy', module_args=new_module_args, task_vars=task_vars) + if diff: + res['diff'] = diff + result.update(res) + else: + result.update(self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars)) + + except AnsibleAction as e: + result.update(e.result) + finally: + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result diff --git a/lib/ansible/plugins/action/assert.py b/lib/ansible/plugins/action/assert.py new file mode 100644 index 0000000..7721a6b --- /dev/null +++ b/lib/ansible/plugins/action/assert.py @@ -0,0 +1,94 @@ +# Copyright 2012, Dag Wieers <dag@wieers.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleError +from ansible.playbook.conditional import Conditional +from ansible.plugins.action import ActionBase +from ansible.module_utils.six import string_types +from ansible.module_utils.parsing.convert_bool import boolean + + +class ActionModule(ActionBase): + ''' Fail with custom message ''' + + TRANSFERS_FILES = False + _VALID_ARGS = frozenset(('fail_msg', 'msg', 'quiet', 'success_msg', 'that')) + + def run(self, tmp=None, task_vars=None): + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + if 'that' not in self._task.args: + raise AnsibleError('conditional required in "that" string') + + fail_msg = None + success_msg = None + + fail_msg = self._task.args.get('fail_msg', self._task.args.get('msg')) + if fail_msg is None: + fail_msg = 'Assertion failed' + elif isinstance(fail_msg, list): + if not all(isinstance(x, string_types) for x in fail_msg): + raise AnsibleError('Type of one of the elements in fail_msg or msg list is not string type') + elif not isinstance(fail_msg, (string_types, list)): + raise AnsibleError('Incorrect type for fail_msg or msg, expected a string or list and got %s' % type(fail_msg)) + + success_msg = self._task.args.get('success_msg') + if success_msg is None: + success_msg = 'All assertions passed' + elif isinstance(success_msg, list): + if not all(isinstance(x, string_types) for x in success_msg): + raise AnsibleError('Type of one of the elements in success_msg list is not string type') + elif not isinstance(success_msg, (string_types, list)): + raise AnsibleError('Incorrect type for success_msg, expected a string or list and got %s' % type(success_msg)) + + quiet = boolean(self._task.args.get('quiet', False), strict=False) + + # make sure the 'that' items are a list + thats = self._task.args['that'] + if not isinstance(thats, list): + thats = [thats] + + # Now we iterate over the that items, temporarily assigning them + # to the task's when value so we can evaluate the conditional using + # the built in evaluate function. The when has already been evaluated + # by this point, and is not used again, so we don't care about mangling + # that value now + cond = Conditional(loader=self._loader) + if not quiet: + result['_ansible_verbose_always'] = True + + for that in thats: + cond.when = [that] + test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars) + if not test_result: + result['failed'] = True + result['evaluated_to'] = test_result + result['assertion'] = that + + result['msg'] = fail_msg + + return result + + result['changed'] = False + result['msg'] = success_msg + return result diff --git a/lib/ansible/plugins/action/async_status.py b/lib/ansible/plugins/action/async_status.py new file mode 100644 index 0000000..ad839f1 --- /dev/null +++ b/lib/ansible/plugins/action/async_status.py @@ -0,0 +1,53 @@ +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleActionFail +from ansible.plugins.action import ActionBase +from ansible.utils.vars import merge_hash + + +class ActionModule(ActionBase): + + def _get_async_dir(self): + + # async directory based on the shell option + async_dir = self.get_shell_option('async_dir', default="~/.ansible_async") + + return self._remote_expand_user(async_dir) + + def run(self, tmp=None, task_vars=None): + + results = super(ActionModule, self).run(tmp, task_vars) + + validation_result, new_module_args = self.validate_argument_spec( + argument_spec={ + 'jid': {'type': 'str', 'required': True}, + 'mode': {'type': 'str', 'choices': ['status', 'cleanup'], 'default': 'status'}, + }, + ) + + # initialize response + results['started'] = results['finished'] = 0 + results['stdout'] = results['stderr'] = '' + results['stdout_lines'] = results['stderr_lines'] = [] + + jid = new_module_args["jid"] + mode = new_module_args["mode"] + + results['ansible_job_id'] = jid + async_dir = self._get_async_dir() + log_path = self._connection._shell.join_path(async_dir, jid) + + if mode == 'cleanup': + results['erased'] = log_path + else: + results['results_file'] = log_path + results['started'] = 1 + + new_module_args['_async_dir'] = async_dir + results = merge_hash(results, self._execute_module(module_name='ansible.legacy.async_status', task_vars=task_vars, module_args=new_module_args)) + + return results diff --git a/lib/ansible/plugins/action/command.py b/lib/ansible/plugins/action/command.py new file mode 100644 index 0000000..82a85dc --- /dev/null +++ b/lib/ansible/plugins/action/command.py @@ -0,0 +1,28 @@ +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible import constants as C +from ansible.plugins.action import ActionBase +from ansible.utils.vars import merge_hash + + +class ActionModule(ActionBase): + + def run(self, tmp=None, task_vars=None): + self._supports_async = True + results = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + wrap_async = self._task.async_val and not self._connection.has_native_async + # explicitly call `ansible.legacy.command` for backcompat to allow library/ override of `command` while not allowing + # collections search for an unqualified `command` module + results = merge_hash(results, self._execute_module(module_name='ansible.legacy.command', task_vars=task_vars, wrap_async=wrap_async)) + + if not wrap_async: + # remove a temporary path we created + self._remove_tmp_path(self._connection._shell.tmpdir) + + return results diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py new file mode 100644 index 0000000..cb3d15b --- /dev/null +++ b/lib/ansible/plugins/action/copy.py @@ -0,0 +1,599 @@ +# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# (c) 2017 Toshio Kuratomi <tkuraotmi@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import os.path +import stat +import tempfile +import traceback + +from ansible import constants as C +from ansible.errors import AnsibleError, AnsibleFileNotFound +from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase +from ansible.utils.hashing import checksum + + +# Supplement the FILE_COMMON_ARGUMENTS with arguments that are specific to file +REAL_FILE_ARGS = frozenset(FILE_COMMON_ARGUMENTS.keys()).union( + ('state', 'path', '_original_basename', 'recurse', 'force', + '_diff_peek', 'src')) + + +def _create_remote_file_args(module_args): + """remove keys that are not relevant to file""" + return dict((k, v) for k, v in module_args.items() if k in REAL_FILE_ARGS) + + +def _create_remote_copy_args(module_args): + """remove action plugin only keys""" + return dict((k, v) for k, v in module_args.items() if k not in ('content', 'decrypt')) + + +def _walk_dirs(topdir, base_path=None, local_follow=False, trailing_slash_detector=None): + """ + Walk a filesystem tree returning enough information to copy the files + + :arg topdir: The directory that the filesystem tree is rooted at + :kwarg base_path: The initial directory structure to strip off of the + files for the destination directory. If this is None (the default), + the base_path is set to ``top_dir``. + :kwarg local_follow: Whether to follow symlinks on the source. When set + to False, no symlinks are dereferenced. When set to True (the + default), the code will dereference most symlinks. However, symlinks + can still be present if needed to break a circular link. + :kwarg trailing_slash_detector: Function to determine if a path has + a trailing directory separator. Only needed when dealing with paths on + a remote machine (in which case, pass in a function that is aware of the + directory separator conventions on the remote machine). + :returns: dictionary of tuples. All of the path elements in the structure are text strings. + This separates all the files, directories, and symlinks along with + important information about each:: + + { 'files': [('/absolute/path/to/copy/from', 'relative/path/to/copy/to'), ...], + 'directories': [('/absolute/path/to/copy/from', 'relative/path/to/copy/to'), ...], + 'symlinks': [('/symlink/target/path', 'relative/path/to/copy/to'), ...], + } + + The ``symlinks`` field is only populated if ``local_follow`` is set to False + *or* a circular symlink cannot be dereferenced. + + """ + # Convert the path segments into byte strings + + r_files = {'files': [], 'directories': [], 'symlinks': []} + + def _recurse(topdir, rel_offset, parent_dirs, rel_base=u''): + """ + This is a closure (function utilizing variables from it's parent + function's scope) so that we only need one copy of all the containers. + Note that this function uses side effects (See the Variables used from + outer scope). + + :arg topdir: The directory we are walking for files + :arg rel_offset: Integer defining how many characters to strip off of + the beginning of a path + :arg parent_dirs: Directories that we're copying that this directory is in. + :kwarg rel_base: String to prepend to the path after ``rel_offset`` is + applied to form the relative path. + + Variables used from the outer scope + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + :r_files: Dictionary of files in the hierarchy. See the return value + for :func:`walk` for the structure of this dictionary. + :local_follow: Read-only inside of :func:`_recurse`. Whether to follow symlinks + """ + for base_path, sub_folders, files in os.walk(topdir): + for filename in files: + filepath = os.path.join(base_path, filename) + dest_filepath = os.path.join(rel_base, filepath[rel_offset:]) + + if os.path.islink(filepath): + # Dereference the symlnk + real_file = os.path.realpath(filepath) + if local_follow and os.path.isfile(real_file): + # Add the file pointed to by the symlink + r_files['files'].append((real_file, dest_filepath)) + else: + # Mark this file as a symlink to copy + r_files['symlinks'].append((os.readlink(filepath), dest_filepath)) + else: + # Just a normal file + r_files['files'].append((filepath, dest_filepath)) + + for dirname in sub_folders: + dirpath = os.path.join(base_path, dirname) + dest_dirpath = os.path.join(rel_base, dirpath[rel_offset:]) + real_dir = os.path.realpath(dirpath) + dir_stats = os.stat(real_dir) + + if os.path.islink(dirpath): + if local_follow: + if (dir_stats.st_dev, dir_stats.st_ino) in parent_dirs: + # Just insert the symlink if the target directory + # exists inside of the copy already + r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath)) + else: + # Walk the dirpath to find all parent directories. + new_parents = set() + parent_dir_list = os.path.dirname(dirpath).split(os.path.sep) + for parent in range(len(parent_dir_list), 0, -1): + parent_stat = os.stat(u'/'.join(parent_dir_list[:parent])) + if (parent_stat.st_dev, parent_stat.st_ino) in parent_dirs: + # Reached the point at which the directory + # tree is already known. Don't add any + # more or we might go to an ancestor that + # isn't being copied. + break + new_parents.add((parent_stat.st_dev, parent_stat.st_ino)) + + if (dir_stats.st_dev, dir_stats.st_ino) in new_parents: + # This was a a circular symlink. So add it as + # a symlink + r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath)) + else: + # Walk the directory pointed to by the symlink + r_files['directories'].append((real_dir, dest_dirpath)) + offset = len(real_dir) + 1 + _recurse(real_dir, offset, parent_dirs.union(new_parents), rel_base=dest_dirpath) + else: + # Add the symlink to the destination + r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath)) + else: + # Just a normal directory + r_files['directories'].append((dirpath, dest_dirpath)) + + # Check if the source ends with a "/" so that we know which directory + # level to work at (similar to rsync) + source_trailing_slash = False + if trailing_slash_detector: + source_trailing_slash = trailing_slash_detector(topdir) + else: + source_trailing_slash = topdir.endswith(os.path.sep) + + # Calculate the offset needed to strip the base_path to make relative + # paths + if base_path is None: + base_path = topdir + if not source_trailing_slash: + base_path = os.path.dirname(base_path) + if topdir.startswith(base_path): + offset = len(base_path) + + # Make sure we're making the new paths relative + if trailing_slash_detector and not trailing_slash_detector(base_path): + offset += 1 + elif not base_path.endswith(os.path.sep): + offset += 1 + + if os.path.islink(topdir) and not local_follow: + r_files['symlinks'] = (os.readlink(topdir), os.path.basename(topdir)) + return r_files + + dir_stats = os.stat(topdir) + parents = frozenset(((dir_stats.st_dev, dir_stats.st_ino),)) + # Actually walk the directory hierarchy + _recurse(topdir, offset, parents) + + return r_files + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + + def _ensure_invocation(self, result): + # NOTE: adding invocation arguments here needs to be kept in sync with + # any no_log specified in the argument_spec in the module. + # This is not automatic. + # NOTE: do not add to this. This should be made a generic function for action plugins. + # This should also use the same argspec as the module instead of keeping it in sync. + if 'invocation' not in result: + if self._play_context.no_log: + result['invocation'] = "CENSORED: no_log is set" + else: + # NOTE: Should be removed in the future. For now keep this broken + # behaviour, have a look in the PR 51582 + result['invocation'] = self._task.args.copy() + result['invocation']['module_args'] = self._task.args.copy() + + if isinstance(result['invocation'], dict): + if 'content' in result['invocation']: + result['invocation']['content'] = 'CENSORED: content is a no_log parameter' + if result['invocation'].get('module_args', {}).get('content') is not None: + result['invocation']['module_args']['content'] = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' + + return result + + def _copy_file(self, source_full, source_rel, content, content_tempfile, + dest, task_vars, follow): + decrypt = boolean(self._task.args.get('decrypt', True), strict=False) + force = boolean(self._task.args.get('force', 'yes'), strict=False) + raw = boolean(self._task.args.get('raw', 'no'), strict=False) + + result = {} + result['diff'] = [] + + # If the local file does not exist, get_real_file() raises AnsibleFileNotFound + try: + source_full = self._loader.get_real_file(source_full, decrypt=decrypt) + except AnsibleFileNotFound as e: + result['failed'] = True + result['msg'] = "could not find src=%s, %s" % (source_full, to_text(e)) + return result + + # Get the local mode and set if user wanted it preserved + # https://github.com/ansible/ansible-modules-core/issues/1124 + lmode = None + if self._task.args.get('mode', None) == 'preserve': + lmode = '0%03o' % stat.S_IMODE(os.stat(source_full).st_mode) + + # This is kind of optimization - if user told us destination is + # dir, do path manipulation right away, otherwise we still check + # for dest being a dir via remote call below. + if self._connection._shell.path_has_trailing_slash(dest): + dest_file = self._connection._shell.join_path(dest, source_rel) + else: + dest_file = dest + + # Attempt to get remote file info + dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, checksum=force) + + if dest_status['exists'] and dest_status['isdir']: + # The dest is a directory. + if content is not None: + # If source was defined as content remove the temporary file and fail out. + self._remove_tempfile_if_content_defined(content, content_tempfile) + result['failed'] = True + result['msg'] = "can not use content with a dir as dest" + return result + else: + # Append the relative source location to the destination and get remote stats again + dest_file = self._connection._shell.join_path(dest, source_rel) + dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, checksum=force) + + if dest_status['exists'] and not force: + # remote_file exists so continue to next iteration. + return None + + # Generate a hash of the local file. + local_checksum = checksum(source_full) + + if local_checksum != dest_status['checksum']: + # The checksums don't match and we will change or error out. + + if self._play_context.diff and not raw: + result['diff'].append(self._get_diff_data(dest_file, source_full, task_vars)) + + if self._play_context.check_mode: + self._remove_tempfile_if_content_defined(content, content_tempfile) + result['changed'] = True + return result + + # Define a remote directory that we will copy the file to. + tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, 'source') + + remote_path = None + + if not raw: + remote_path = self._transfer_file(source_full, tmp_src) + else: + self._transfer_file(source_full, dest_file) + + # We have copied the file remotely and no longer require our content_tempfile + self._remove_tempfile_if_content_defined(content, content_tempfile) + self._loader.cleanup_tmp_file(source_full) + + # FIXME: I don't think this is needed when PIPELINING=0 because the source is created + # world readable. Access to the directory itself is controlled via fixup_perms2() as + # part of executing the module. Check that umask with scp/sftp/piped doesn't cause + # a problem before acting on this idea. (This idea would save a round-trip) + # fix file permissions when the copy is done as a different user + if remote_path: + self._fixup_perms2((self._connection._shell.tmpdir, remote_path)) + + if raw: + # Continue to next iteration if raw is defined. + return None + + # Run the copy module + + # src and dest here come after original and override them + # we pass dest only to make sure it includes trailing slash in case of recursive copy + new_module_args = _create_remote_copy_args(self._task.args) + new_module_args.update( + dict( + src=tmp_src, + dest=dest, + _original_basename=source_rel, + follow=follow + ) + ) + if not self._task.args.get('checksum'): + new_module_args['checksum'] = local_checksum + + if lmode: + new_module_args['mode'] = lmode + + module_return = self._execute_module(module_name='ansible.legacy.copy', module_args=new_module_args, task_vars=task_vars) + + else: + # no need to transfer the file, already correct hash, but still need to call + # the file module in case we want to change attributes + self._remove_tempfile_if_content_defined(content, content_tempfile) + self._loader.cleanup_tmp_file(source_full) + + if raw: + return None + + # Fix for https://github.com/ansible/ansible-modules-core/issues/1568. + # If checksums match, and follow = True, find out if 'dest' is a link. If so, + # change it to point to the source of the link. + if follow: + dest_status_nofollow = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=False) + if dest_status_nofollow['islnk'] and 'lnk_source' in dest_status_nofollow.keys(): + dest = dest_status_nofollow['lnk_source'] + + # Build temporary module_args. + new_module_args = _create_remote_file_args(self._task.args) + new_module_args.update( + dict( + dest=dest, + _original_basename=source_rel, + recurse=False, + state='file', + ) + ) + # src is sent to the file module in _original_basename, not in src + try: + del new_module_args['src'] + except KeyError: + pass + + if lmode: + new_module_args['mode'] = lmode + + # Execute the file module. + module_return = self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars) + + if not module_return.get('checksum'): + module_return['checksum'] = local_checksum + + result.update(module_return) + return result + + def _create_content_tempfile(self, content): + ''' Create a tempfile containing defined content ''' + fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP) + f = os.fdopen(fd, 'wb') + content = to_bytes(content) + try: + f.write(content) + except Exception as err: + os.remove(content_tempfile) + raise Exception(err) + finally: + f.close() + return content_tempfile + + def _remove_tempfile_if_content_defined(self, content, content_tempfile): + if content is not None: + os.remove(content_tempfile) + + def run(self, tmp=None, task_vars=None): + ''' handler for file transfer operations ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + source = self._task.args.get('src', None) + content = self._task.args.get('content', None) + dest = self._task.args.get('dest', None) + remote_src = boolean(self._task.args.get('remote_src', False), strict=False) + local_follow = boolean(self._task.args.get('local_follow', True), strict=False) + + result['failed'] = True + if not source and content is None: + result['msg'] = 'src (or content) is required' + elif not dest: + result['msg'] = 'dest is required' + elif source and content is not None: + result['msg'] = 'src and content are mutually exclusive' + elif content is not None and dest is not None and dest.endswith("/"): + result['msg'] = "can not use content with a dir as dest" + else: + del result['failed'] + + if result.get('failed'): + return self._ensure_invocation(result) + + # Define content_tempfile in case we set it after finding content populated. + content_tempfile = None + + # If content is defined make a tmp file and write the content into it. + if content is not None: + try: + # If content comes to us as a dict it should be decoded json. + # We need to encode it back into a string to write it out. + if isinstance(content, dict) or isinstance(content, list): + content_tempfile = self._create_content_tempfile(json.dumps(content)) + else: + content_tempfile = self._create_content_tempfile(content) + source = content_tempfile + except Exception as err: + result['failed'] = True + result['msg'] = "could not write content temp file: %s" % to_native(err) + return self._ensure_invocation(result) + + # if we have first_available_file in our vars + # look up the files and use the first one we find as src + elif remote_src: + result.update(self._execute_module(module_name='ansible.legacy.copy', task_vars=task_vars)) + return self._ensure_invocation(result) + else: + # find_needle returns a path that may not have a trailing slash on + # a directory so we need to determine that now (we use it just + # like rsync does to figure out whether to include the directory + # or only the files inside the directory + trailing_slash = source.endswith(os.path.sep) + try: + # find in expected paths + source = self._find_needle('files', source) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return self._ensure_invocation(result) + + if trailing_slash != source.endswith(os.path.sep): + if source[-1] == os.path.sep: + source = source[:-1] + else: + source = source + os.path.sep + + # A list of source file tuples (full_path, relative_path) which will try to copy to the destination + source_files = {'files': [], 'directories': [], 'symlinks': []} + + # If source is a directory populate our list else source is a file and translate it to a tuple. + if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')): + # Get a list of the files we want to replicate on the remote side + source_files = _walk_dirs(source, local_follow=local_follow, + trailing_slash_detector=self._connection._shell.path_has_trailing_slash) + + # If it's recursive copy, destination is always a dir, + # explicitly mark it so (note - copy module relies on this). + if not self._connection._shell.path_has_trailing_slash(dest): + dest = self._connection._shell.join_path(dest, '') + # FIXME: Can we optimize cases where there's only one file, no + # symlinks and any number of directories? In the original code, + # empty directories are not copied.... + else: + source_files['files'] = [(source, os.path.basename(source))] + + changed = False + module_return = dict(changed=False) + + # A register for if we executed a module. + # Used to cut down on command calls when not recursive. + module_executed = False + + # expand any user home dir specifier + dest = self._remote_expand_user(dest) + + implicit_directories = set() + for source_full, source_rel in source_files['files']: + # copy files over. This happens first as directories that have + # a file do not need to be created later + + # We only follow symlinks for files in the non-recursive case + if source_files['directories']: + follow = False + else: + follow = boolean(self._task.args.get('follow', False), strict=False) + + module_return = self._copy_file(source_full, source_rel, content, content_tempfile, dest, task_vars, follow) + if module_return is None: + continue + + if module_return.get('failed'): + result.update(module_return) + return self._ensure_invocation(result) + + paths = os.path.split(source_rel) + dir_path = '' + for dir_component in paths: + os.path.join(dir_path, dir_component) + implicit_directories.add(dir_path) + if 'diff' in result and not result['diff']: + del result['diff'] + module_executed = True + changed = changed or module_return.get('changed', False) + + for src, dest_path in source_files['directories']: + # Find directories that are leaves as they might not have been + # created yet. + if dest_path in implicit_directories: + continue + + # Use file module to create these + new_module_args = _create_remote_file_args(self._task.args) + new_module_args['path'] = os.path.join(dest, dest_path) + new_module_args['state'] = 'directory' + new_module_args['mode'] = self._task.args.get('directory_mode', None) + new_module_args['recurse'] = False + del new_module_args['src'] + + module_return = self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars) + + if module_return.get('failed'): + result.update(module_return) + return self._ensure_invocation(result) + + module_executed = True + changed = changed or module_return.get('changed', False) + + for target_path, dest_path in source_files['symlinks']: + # Copy symlinks over + new_module_args = _create_remote_file_args(self._task.args) + new_module_args['path'] = os.path.join(dest, dest_path) + new_module_args['src'] = target_path + new_module_args['state'] = 'link' + new_module_args['force'] = True + + # Only follow remote symlinks in the non-recursive case + if source_files['directories']: + new_module_args['follow'] = False + + # file module cannot deal with 'preserve' mode and is meaningless + # for symlinks anyway, so just don't pass it. + if new_module_args.get('mode', None) == 'preserve': + new_module_args.pop('mode') + + module_return = self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars) + module_executed = True + + if module_return.get('failed'): + result.update(module_return) + return self._ensure_invocation(result) + + changed = changed or module_return.get('changed', False) + + if module_executed and len(source_files['files']) == 1: + result.update(module_return) + + # the file module returns the file path as 'path', but + # the copy module uses 'dest', so add it if it's not there + if 'path' in result and 'dest' not in result: + result['dest'] = result['path'] + else: + result.update(dict(dest=dest, src=source, changed=changed)) + + # Delete tmp path + self._remove_tmp_path(self._connection._shell.tmpdir) + + return self._ensure_invocation(result) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py new file mode 100644 index 0000000..2584fd3 --- /dev/null +++ b/lib/ansible/plugins/action/debug.py @@ -0,0 +1,80 @@ +# Copyright 2012, Dag Wieers <dag@wieers.com> +# Copyright 2016, Toshio Kuratomi <tkuratomi@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleUndefinedVariable +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + ''' Print statements during execution ''' + + TRANSFERS_FILES = False + _VALID_ARGS = frozenset(('msg', 'var', 'verbosity')) + + def run(self, tmp=None, task_vars=None): + if task_vars is None: + task_vars = dict() + + if 'msg' in self._task.args and 'var' in self._task.args: + return {"failed": True, "msg": "'msg' and 'var' are incompatible options"} + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + # get task verbosity + verbosity = int(self._task.args.get('verbosity', 0)) + + if verbosity <= self._display.verbosity: + if 'msg' in self._task.args: + result['msg'] = self._task.args['msg'] + + elif 'var' in self._task.args: + try: + results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True) + if results == self._task.args['var']: + # if results is not str/unicode type, raise an exception + if not isinstance(results, string_types): + raise AnsibleUndefinedVariable + # If var name is same as result, try to template it + results = self._templar.template("{{" + results + "}}", convert_bare=True, fail_on_undefined=True) + except AnsibleUndefinedVariable as e: + results = u"VARIABLE IS NOT DEFINED!" + if self._display.verbosity > 0: + results += u": %s" % to_text(e) + + if isinstance(self._task.args['var'], (list, dict)): + # If var is a list or dict, use the type as key to display + result[to_text(type(self._task.args['var']))] = results + else: + result[self._task.args['var']] = results + else: + result['msg'] = 'Hello world!' + + # force flag to make debug output module always verbose + result['_ansible_verbose_always'] = True + else: + result['skipped_reason'] = "Verbosity threshold not met." + result['skipped'] = True + + result['failed'] = False + + return result diff --git a/lib/ansible/plugins/action/fail.py b/lib/ansible/plugins/action/fail.py new file mode 100644 index 0000000..8d3450c --- /dev/null +++ b/lib/ansible/plugins/action/fail.py @@ -0,0 +1,43 @@ +# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# (c) 2012, Dag Wieers <dag@wieers.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + ''' Fail with custom message ''' + + TRANSFERS_FILES = False + _VALID_ARGS = frozenset(('msg',)) + + def run(self, tmp=None, task_vars=None): + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + msg = 'Failed as requested from task' + if self._task.args and 'msg' in self._task.args: + msg = self._task.args.get('msg') + + result['failed'] = True + result['msg'] = msg + return result diff --git a/lib/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py new file mode 100644 index 0000000..992ba5a --- /dev/null +++ b/lib/ansible/plugins/action/fetch.py @@ -0,0 +1,207 @@ +# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import base64 +from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleActionSkip +from ansible.module_utils.common.text.converters import to_bytes, to_text +from ansible.module_utils.six import string_types +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display +from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash +from ansible.utils.path import makedirs_safe, is_subpath + +display = Display() + + +class ActionModule(ActionBase): + + def run(self, tmp=None, task_vars=None): + ''' handler for fetch operations ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + try: + if self._play_context.check_mode: + raise AnsibleActionSkip('check mode not (yet) supported for this module') + + source = self._task.args.get('src', None) + original_dest = dest = self._task.args.get('dest', None) + flat = boolean(self._task.args.get('flat'), strict=False) + fail_on_missing = boolean(self._task.args.get('fail_on_missing', True), strict=False) + validate_checksum = boolean(self._task.args.get('validate_checksum', True), strict=False) + + msg = '' + # validate source and dest are strings FIXME: use basic.py and module specs + if not isinstance(source, string_types): + msg = "Invalid type supplied for source option, it must be a string" + + if not isinstance(dest, string_types): + msg = "Invalid type supplied for dest option, it must be a string" + + if source is None or dest is None: + msg = "src and dest are required" + + if msg: + raise AnsibleActionFail(msg) + + source = self._connection._shell.join_path(source) + source = self._remote_expand_user(source) + + remote_stat = {} + remote_checksum = None + if not self._connection.become: + # Get checksum for the remote file. Don't bother if using become as slurp will be used. + # Follow symlinks because fetch always follows symlinks + try: + remote_stat = self._execute_remote_stat(source, all_vars=task_vars, follow=True) + except AnsibleError as ae: + result['changed'] = False + result['file'] = source + if fail_on_missing: + result['failed'] = True + result['msg'] = to_text(ae) + else: + result['msg'] = "%s, ignored" % to_text(ae, errors='surrogate_or_replace') + + return result + + remote_checksum = remote_stat.get('checksum') + if remote_stat.get('exists'): + if remote_stat.get('isdir'): + result['failed'] = True + result['changed'] = False + result['msg'] = "remote file is a directory, fetch cannot work on directories" + + # Historically, these don't fail because you may want to transfer + # a log file that possibly MAY exist but keep going to fetch other + # log files. Today, this is better achieved by adding + # ignore_errors or failed_when to the task. Control the behaviour + # via fail_when_missing + if not fail_on_missing: + result['msg'] += ", not transferring, ignored" + del result['changed'] + del result['failed'] + + return result + + # use slurp if permissions are lacking or privilege escalation is needed + remote_data = None + if remote_checksum in (None, '1', ''): + slurpres = self._execute_module(module_name='ansible.legacy.slurp', module_args=dict(src=source), task_vars=task_vars) + if slurpres.get('failed'): + if not fail_on_missing: + result['file'] = source + result['changed'] = False + else: + result.update(slurpres) + + if 'not found' in slurpres.get('msg', ''): + result['msg'] = "the remote file does not exist, not transferring, ignored" + elif slurpres.get('msg', '').startswith('source is a directory'): + result['msg'] = "remote file is a directory, fetch cannot work on directories" + + return result + else: + if slurpres['encoding'] == 'base64': + remote_data = base64.b64decode(slurpres['content']) + if remote_data is not None: + remote_checksum = checksum_s(remote_data) + + # calculate the destination name + if os.path.sep not in self._connection._shell.join_path('a', ''): + source = self._connection._shell._unquote(source) + source_local = source.replace('\\', '/') + else: + source_local = source + + # ensure we only use file name, avoid relative paths + if not is_subpath(dest, original_dest): + # TODO: ? dest = os.path.expanduser(dest.replace(('../',''))) + raise AnsibleActionFail("Detected directory traversal, expected to be contained in '%s' but got '%s'" % (original_dest, dest)) + + if flat: + if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict')) and not dest.endswith(os.sep): + raise AnsibleActionFail("dest is an existing directory, use a trailing slash if you want to fetch src into that directory") + if dest.endswith(os.sep): + # if the path ends with "/", we'll use the source filename as the + # destination filename + base = os.path.basename(source_local) + dest = os.path.join(dest, base) + if not dest.startswith("/"): + # if dest does not start with "/", we'll assume a relative path + dest = self._loader.path_dwim(dest) + else: + # files are saved in dest dir, with a subdir for each host, then the filename + if 'inventory_hostname' in task_vars: + target_name = task_vars['inventory_hostname'] + else: + target_name = self._play_context.remote_addr + dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local) + + dest = os.path.normpath(dest) + + # calculate checksum for the local file + local_checksum = checksum(dest) + + if remote_checksum != local_checksum: + # create the containing directories, if needed + makedirs_safe(os.path.dirname(dest)) + + # fetch the file and check for changes + if remote_data is None: + self._connection.fetch_file(source, dest) + else: + try: + f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb') + f.write(remote_data) + f.close() + except (IOError, OSError) as e: + raise AnsibleActionFail("Failed to fetch the file: %s" % e) + new_checksum = secure_hash(dest) + # For backwards compatibility. We'll return None on FIPS enabled systems + try: + new_md5 = md5(dest) + except ValueError: + new_md5 = None + + if validate_checksum and new_checksum != remote_checksum: + result.update(dict(failed=True, md5sum=new_md5, + msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, + checksum=new_checksum, remote_checksum=remote_checksum)) + else: + result.update({'changed': True, 'md5sum': new_md5, 'dest': dest, + 'remote_md5sum': None, 'checksum': new_checksum, + 'remote_checksum': remote_checksum}) + else: + # For backwards compatibility. We'll return None on FIPS enabled systems + try: + local_md5 = md5(dest) + except ValueError: + local_md5 = None + result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)) + + finally: + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result diff --git a/lib/ansible/plugins/action/gather_facts.py b/lib/ansible/plugins/action/gather_facts.py new file mode 100644 index 0000000..3ff7beb --- /dev/null +++ b/lib/ansible/plugins/action/gather_facts.py @@ -0,0 +1,152 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import time + +from ansible import constants as C +from ansible.executor.module_common import get_action_args_with_defaults +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase +from ansible.utils.vars import merge_hash + + +class ActionModule(ActionBase): + + def _get_module_args(self, fact_module, task_vars): + + mod_args = self._task.args.copy() + + # deal with 'setup specific arguments' + if fact_module not in C._ACTION_SETUP: + # TODO: remove in favor of controller side argspec detecing valid arguments + # network facts modules must support gather_subset + try: + name = self._connection.ansible_name.removeprefix('ansible.netcommon.') + except AttributeError: + name = self._connection._load_name.split('.')[-1] + if name not in ('network_cli', 'httpapi', 'netconf'): + subset = mod_args.pop('gather_subset', None) + if subset not in ('all', ['all']): + self._display.warning('Ignoring subset(%s) for %s' % (subset, fact_module)) + + timeout = mod_args.pop('gather_timeout', None) + if timeout is not None: + self._display.warning('Ignoring timeout(%s) for %s' % (timeout, fact_module)) + + fact_filter = mod_args.pop('filter', None) + if fact_filter is not None: + self._display.warning('Ignoring filter(%s) for %s' % (fact_filter, fact_module)) + + # Strip out keys with ``None`` values, effectively mimicking ``omit`` behavior + # This ensures we don't pass a ``None`` value as an argument expecting a specific type + mod_args = dict((k, v) for k, v in mod_args.items() if v is not None) + + # handle module defaults + resolved_fact_module = self._shared_loader_obj.module_loader.find_plugin_with_context( + fact_module, collection_list=self._task.collections + ).resolved_fqcn + + mod_args = get_action_args_with_defaults( + resolved_fact_module, mod_args, self._task.module_defaults, self._templar, + action_groups=self._task._parent._play._action_groups + ) + + return mod_args + + def _combine_task_result(self, result, task_result): + filtered_res = { + 'ansible_facts': task_result.get('ansible_facts', {}), + 'warnings': task_result.get('warnings', []), + 'deprecations': task_result.get('deprecations', []), + } + + # on conflict the last plugin processed wins, but try to do deep merge and append to lists. + return merge_hash(result, filtered_res, list_merge='append_rp') + + def run(self, tmp=None, task_vars=None): + + self._supports_check_mode = True + + result = super(ActionModule, self).run(tmp, task_vars) + result['ansible_facts'] = {} + + # copy the value with list() so we don't mutate the config + modules = list(C.config.get_config_value('FACTS_MODULES', variables=task_vars)) + + parallel = task_vars.pop('ansible_facts_parallel', self._task.args.pop('parallel', None)) + if 'smart' in modules: + connection_map = C.config.get_config_value('CONNECTION_FACTS_MODULES', variables=task_vars) + network_os = self._task.args.get('network_os', task_vars.get('ansible_network_os', task_vars.get('ansible_facts', {}).get('network_os'))) + modules.extend([connection_map.get(network_os or self._connection.ansible_name, 'ansible.legacy.setup')]) + modules.pop(modules.index('smart')) + + failed = {} + skipped = {} + + if parallel is None and len(modules) >= 1: + parallel = True + else: + parallel = boolean(parallel) + + if parallel: + # serially execute each module + for fact_module in modules: + # just one module, no need for fancy async + mod_args = self._get_module_args(fact_module, task_vars) + res = self._execute_module(module_name=fact_module, module_args=mod_args, task_vars=task_vars, wrap_async=False) + if res.get('failed', False): + failed[fact_module] = res + elif res.get('skipped', False): + skipped[fact_module] = res + else: + result = self._combine_task_result(result, res) + + self._remove_tmp_path(self._connection._shell.tmpdir) + else: + # do it async + jobs = {} + for fact_module in modules: + mod_args = self._get_module_args(fact_module, task_vars) + self._display.vvvv("Running %s" % fact_module) + jobs[fact_module] = (self._execute_module(module_name=fact_module, module_args=mod_args, task_vars=task_vars, wrap_async=True)) + + while jobs: + for module in jobs: + poll_args = {'jid': jobs[module]['ansible_job_id'], '_async_dir': os.path.dirname(jobs[module]['results_file'])} + res = self._execute_module(module_name='ansible.legacy.async_status', module_args=poll_args, task_vars=task_vars, wrap_async=False) + if res.get('finished', 0) == 1: + if res.get('failed', False): + failed[module] = res + elif res.get('skipped', False): + skipped[module] = res + else: + result = self._combine_task_result(result, res) + del jobs[module] + break + else: + time.sleep(0.1) + else: + time.sleep(0.5) + + if skipped: + result['msg'] = "The following modules were skipped: %s\n" % (', '.join(skipped.keys())) + result['skipped_modules'] = skipped + if len(skipped) == len(modules): + result['skipped'] = True + + if failed: + result['failed'] = True + result['msg'] = "The following modules failed to execute: %s\n" % (', '.join(failed.keys())) + result['failed_modules'] = failed + + # tell executor facts were gathered + result['ansible_facts']['_ansible_facts_gathered'] = True + + # hack to keep --verbose from showing all the setup module result + result['_ansible_verbose_override'] = True + + return result diff --git a/lib/ansible/plugins/action/group_by.py b/lib/ansible/plugins/action/group_by.py new file mode 100644 index 0000000..0958ad8 --- /dev/null +++ b/lib/ansible/plugins/action/group_by.py @@ -0,0 +1,51 @@ +# Copyright 2012, Jeroen Hoekx <jeroen@hoekx.be> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase +from ansible.module_utils.six import string_types + + +class ActionModule(ActionBase): + ''' Create inventory groups based on variables ''' + + # We need to be able to modify the inventory + TRANSFERS_FILES = False + _VALID_ARGS = frozenset(('key', 'parents')) + + def run(self, tmp=None, task_vars=None): + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + if 'key' not in self._task.args: + result['failed'] = True + result['msg'] = "the 'key' param is required when using group_by" + return result + + group_name = self._task.args.get('key') + parent_groups = self._task.args.get('parents', ['all']) + if isinstance(parent_groups, string_types): + parent_groups = [parent_groups] + + result['changed'] = False + result['add_group'] = group_name.replace(' ', '-') + result['parent_groups'] = [name.replace(' ', '-') for name in parent_groups] + return result diff --git a/lib/ansible/plugins/action/include_vars.py b/lib/ansible/plugins/action/include_vars.py new file mode 100644 index 0000000..3c3cb9e --- /dev/null +++ b/lib/ansible/plugins/action/include_vars.py @@ -0,0 +1,290 @@ +# Copyright: (c) 2016, Allen Sanabria <asanabria@linuxdynasty.org> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from os import path, walk +import re + +import ansible.constants as C +from ansible.errors import AnsibleError +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native, to_text +from ansible.plugins.action import ActionBase +from ansible.utils.vars import combine_vars + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = False + + VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json'] + VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions', 'ignore_unknown_extensions'] + VALID_FILE_ARGUMENTS = ['file', '_raw_params'] + VALID_ALL = ['name', 'hash_behaviour'] + + def _set_dir_defaults(self): + if not self.depth: + self.depth = 0 + + if self.files_matching: + self.matcher = re.compile(r'{0}'.format(self.files_matching)) + else: + self.matcher = None + + if not self.ignore_files: + self.ignore_files = list() + + if isinstance(self.ignore_files, string_types): + self.ignore_files = self.ignore_files.split() + + elif isinstance(self.ignore_files, dict): + return { + 'failed': True, + 'message': '{0} must be a list'.format(self.ignore_files) + } + + def _set_args(self): + """ Set instance variables based on the arguments that were passed """ + + self.hash_behaviour = self._task.args.get('hash_behaviour', None) + self.return_results_as_name = self._task.args.get('name', None) + self.source_dir = self._task.args.get('dir', None) + self.source_file = self._task.args.get('file', None) + if not self.source_dir and not self.source_file: + self.source_file = self._task.args.get('_raw_params') + if self.source_file: + self.source_file = self.source_file.rstrip('\n') + + self.depth = self._task.args.get('depth', None) + self.files_matching = self._task.args.get('files_matching', None) + self.ignore_unknown_extensions = self._task.args.get('ignore_unknown_extensions', False) + self.ignore_files = self._task.args.get('ignore_files', None) + self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS) + + # convert/validate extensions list + if isinstance(self.valid_extensions, string_types): + self.valid_extensions = list(self.valid_extensions) + if not isinstance(self.valid_extensions, list): + raise AnsibleError('Invalid type for "extensions" option, it must be a list') + + def run(self, tmp=None, task_vars=None): + """ Load yml files recursively from a directory. + """ + del tmp # tmp no longer has any effect + + if task_vars is None: + task_vars = dict() + + self.show_content = True + self.included_files = [] + + # Validate arguments + dirs = 0 + files = 0 + for arg in self._task.args: + if arg in self.VALID_DIR_ARGUMENTS: + dirs += 1 + elif arg in self.VALID_FILE_ARGUMENTS: + files += 1 + elif arg in self.VALID_ALL: + pass + else: + raise AnsibleError('{0} is not a valid option in include_vars'.format(to_native(arg))) + + if dirs and files: + raise AnsibleError("You are mixing file only and dir only arguments, these are incompatible") + + # set internal vars from args + self._set_args() + + results = dict() + failed = False + if self.source_dir: + self._set_dir_defaults() + self._set_root_dir() + if not path.exists(self.source_dir): + failed = True + err_msg = ('{0} directory does not exist'.format(to_native(self.source_dir))) + elif not path.isdir(self.source_dir): + failed = True + err_msg = ('{0} is not a directory'.format(to_native(self.source_dir))) + else: + for root_dir, filenames in self._traverse_dir_depth(): + failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames)) + if failed: + break + results.update(updated_results) + else: + try: + self.source_file = self._find_needle('vars', self.source_file) + failed, err_msg, updated_results = ( + self._load_files(self.source_file) + ) + if not failed: + results.update(updated_results) + + except AnsibleError as e: + failed = True + err_msg = to_native(e) + + if self.return_results_as_name: + scope = dict() + scope[self.return_results_as_name] = results + results = scope + + result = super(ActionModule, self).run(task_vars=task_vars) + + if failed: + result['failed'] = failed + result['message'] = err_msg + elif self.hash_behaviour is not None and self.hash_behaviour != C.DEFAULT_HASH_BEHAVIOUR: + merge_hashes = self.hash_behaviour == 'merge' + for key, value in results.items(): + old_value = task_vars.get(key, None) + results[key] = combine_vars(old_value, value, merge=merge_hashes) + + result['ansible_included_var_files'] = self.included_files + result['ansible_facts'] = results + result['_ansible_no_log'] = not self.show_content + + return result + + def _set_root_dir(self): + if self._task._role: + if self.source_dir.split('/')[0] == 'vars': + path_to_use = ( + path.join(self._task._role._role_path, self.source_dir) + ) + if path.exists(path_to_use): + self.source_dir = path_to_use + else: + path_to_use = ( + path.join( + self._task._role._role_path, 'vars', self.source_dir + ) + ) + self.source_dir = path_to_use + else: + if hasattr(self._task._ds, '_data_source'): + current_dir = ( + "/".join(self._task._ds._data_source.split('/')[:-1]) + ) + self.source_dir = path.join(current_dir, self.source_dir) + + def _log_walk(self, error): + self._display.vvv('Issue with walking through "%s": %s' % (to_native(error.filename), to_native(error))) + + def _traverse_dir_depth(self): + """ Recursively iterate over a directory and sort the files in + alphabetical order. Do not iterate pass the set depth. + The default depth is unlimited. + """ + current_depth = 0 + sorted_walk = list(walk(self.source_dir, onerror=self._log_walk)) + sorted_walk.sort(key=lambda x: x[0]) + for current_root, current_dir, current_files in sorted_walk: + current_depth += 1 + if current_depth <= self.depth or self.depth == 0: + current_files.sort() + yield (current_root, current_files) + else: + break + + def _ignore_file(self, filename): + """ Return True if a file matches the list of ignore_files. + Args: + filename (str): The filename that is being matched against. + + Returns: + Boolean + """ + for file_type in self.ignore_files: + try: + if re.search(r'{0}$'.format(file_type), filename): + return True + except Exception: + err_msg = 'Invalid regular expression: {0}'.format(file_type) + raise AnsibleError(err_msg) + return False + + def _is_valid_file_ext(self, source_file): + """ Verify if source file has a valid extension + Args: + source_file (str): The full path of source file or source file. + Returns: + Bool + """ + file_ext = path.splitext(source_file) + return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions) + + def _load_files(self, filename, validate_extensions=False): + """ Loads a file and converts the output into a valid Python dict. + Args: + filename (str): The source file. + + Returns: + Tuple (bool, str, dict) + """ + results = dict() + failed = False + err_msg = '' + if validate_extensions and not self._is_valid_file_ext(filename): + failed = True + err_msg = ('{0} does not have a valid extension: {1}'.format(to_native(filename), ', '.join(self.valid_extensions))) + else: + b_data, show_content = self._loader._get_file_contents(filename) + data = to_text(b_data, errors='surrogate_or_strict') + + self.show_content = show_content + data = self._loader.load(data, file_name=filename, show_content=show_content) + if not data: + data = dict() + if not isinstance(data, dict): + failed = True + err_msg = ('{0} must be stored as a dictionary/hash'.format(to_native(filename))) + else: + self.included_files.append(filename) + results.update(data) + + return failed, err_msg, results + + def _load_files_in_dir(self, root_dir, var_files): + """ Load the found yml files and update/overwrite the dictionary. + Args: + root_dir (str): The base directory of the list of files that is being passed. + var_files: (list): List of files to iterate over and load into a dictionary. + + Returns: + Tuple (bool, str, dict) + """ + results = dict() + failed = False + err_msg = '' + for filename in var_files: + stop_iter = False + # Never include main.yml from a role, as that is the default included by the role + if self._task._role: + if path.join(self._task._role._role_path, filename) == path.join(root_dir, 'vars', 'main.yml'): + stop_iter = True + continue + + filepath = path.join(root_dir, filename) + if self.files_matching: + if not self.matcher.search(filename): + stop_iter = True + + if not stop_iter and not failed: + if self.ignore_unknown_extensions: + if path.exists(filepath) and not self._ignore_file(filename) and self._is_valid_file_ext(filename): + failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True) + if not failed: + results.update(loaded_data) + else: + if path.exists(filepath) and not self._ignore_file(filename): + failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True) + if not failed: + results.update(loaded_data) + + return failed, err_msg, results diff --git a/lib/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py new file mode 100644 index 0000000..cb91521 --- /dev/null +++ b/lib/ansible/plugins/action/normal.py @@ -0,0 +1,59 @@ +# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible import constants as C +from ansible.plugins.action import ActionBase +from ansible.utils.vars import merge_hash + + +class ActionModule(ActionBase): + + def run(self, tmp=None, task_vars=None): + + # individual modules might disagree but as the generic the action plugin, pass at this point. + self._supports_check_mode = True + self._supports_async = True + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + if not result.get('skipped'): + + if result.get('invocation', {}).get('module_args'): + # avoid passing to modules in case of no_log + # should not be set anymore but here for backwards compatibility + del result['invocation']['module_args'] + + # FUTURE: better to let _execute_module calculate this internally? + wrap_async = self._task.async_val and not self._connection.has_native_async + + # do work! + result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=wrap_async)) + + # hack to keep --verbose from showing all the setup module result + # moved from setup module as now we filter out all _ansible_ from result + # FIXME: is this still accurate with gather_facts etc, or does it need support for FQ and other names? + if self._task.action in C._ACTION_SETUP: + result['_ansible_verbose_override'] = True + + if not wrap_async: + # remove a temporary path we created + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result diff --git a/lib/ansible/plugins/action/package.py b/lib/ansible/plugins/action/package.py new file mode 100644 index 0000000..6c43659 --- /dev/null +++ b/lib/ansible/plugins/action/package.py @@ -0,0 +1,96 @@ +# (c) 2015, Ansible Inc, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleAction, AnsibleActionFail +from ansible.executor.module_common import get_action_args_with_defaults +from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = False + + BUILTIN_PKG_MGR_MODULES = {manager['name'] for manager in PKG_MGRS} + + def run(self, tmp=None, task_vars=None): + ''' handler for package operations ''' + + self._supports_check_mode = True + self._supports_async = True + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + module = self._task.args.get('use', 'auto') + + if module == 'auto': + try: + if self._task.delegate_to: # if we delegate, we should use delegated host's facts + module = self._templar.template("{{hostvars['%s']['ansible_facts']['pkg_mgr']}}" % self._task.delegate_to) + else: + module = self._templar.template('{{ansible_facts.pkg_mgr}}') + except Exception: + pass # could not get it from template! + + try: + if module == 'auto': + facts = self._execute_module( + module_name='ansible.legacy.setup', + module_args=dict(filter='ansible_pkg_mgr', gather_subset='!all'), + task_vars=task_vars) + display.debug("Facts %s" % facts) + module = facts.get('ansible_facts', {}).get('ansible_pkg_mgr', 'auto') + + if module != 'auto': + if not self._shared_loader_obj.module_loader.has_plugin(module): + raise AnsibleActionFail('Could not find a module for %s.' % module) + else: + # run the 'package' module + new_module_args = self._task.args.copy() + if 'use' in new_module_args: + del new_module_args['use'] + + # get defaults for specific module + context = self._shared_loader_obj.module_loader.find_plugin_with_context(module, collection_list=self._task.collections) + new_module_args = get_action_args_with_defaults( + context.resolved_fqcn, new_module_args, self._task.module_defaults, self._templar, + action_groups=self._task._parent._play._action_groups + ) + + if module in self.BUILTIN_PKG_MGR_MODULES: + # prefix with ansible.legacy to eliminate external collisions while still allowing library/ override + module = 'ansible.legacy.' + module + + display.vvvv("Running %s" % module) + result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val)) + else: + raise AnsibleActionFail('Could not detect which package manager to use. Try gathering facts or setting the "use" option.') + + except AnsibleAction as e: + result.update(e.result) + finally: + if not self._task.async_val: + # remove a temporary path we created + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result diff --git a/lib/ansible/plugins/action/pause.py b/lib/ansible/plugins/action/pause.py new file mode 100644 index 0000000..4c98cbb --- /dev/null +++ b/lib/ansible/plugins/action/pause.py @@ -0,0 +1,311 @@ +# Copyright 2012, Tim Bielawa <tbielawa@redhat.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import datetime +import signal +import sys +import termios +import time +import tty + +from os import ( + getpgrp, + isatty, + tcgetpgrp, +) +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_text, to_native +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display + +display = Display() + +try: + import curses + import io + + # Nest the try except since curses.error is not available if curses did not import + try: + curses.setupterm() + HAS_CURSES = True + except (curses.error, TypeError, io.UnsupportedOperation): + HAS_CURSES = False +except ImportError: + HAS_CURSES = False + +MOVE_TO_BOL = b'\r' +CLEAR_TO_EOL = b'\x1b[K' +if HAS_CURSES: + # curses.tigetstr() returns None in some circumstances + MOVE_TO_BOL = curses.tigetstr('cr') or MOVE_TO_BOL + CLEAR_TO_EOL = curses.tigetstr('el') or CLEAR_TO_EOL + + +def setraw(fd, when=termios.TCSAFLUSH): + """Put terminal into a raw mode. + + Copied from ``tty`` from CPython 3.11.0, and modified to not remove OPOST from OFLAG + + OPOST is kept to prevent an issue with multi line prompts from being corrupted now that display + is proxied via the queue from forks. The problem is a race condition, in that we proxy the display + over the fork, but before it can be displayed, this plugin will have continued executing, potentially + setting stdout and stdin to raw which remove output post processing that commonly converts NL to CRLF + """ + mode = termios.tcgetattr(fd) + mode[tty.IFLAG] = mode[tty.IFLAG] & ~(termios.BRKINT | termios.ICRNL | termios.INPCK | termios.ISTRIP | termios.IXON) + # mode[tty.OFLAG] = mode[tty.OFLAG] & ~(termios.OPOST) + mode[tty.CFLAG] = mode[tty.CFLAG] & ~(termios.CSIZE | termios.PARENB) + mode[tty.CFLAG] = mode[tty.CFLAG] | termios.CS8 + mode[tty.LFLAG] = mode[tty.LFLAG] & ~(termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG) + mode[tty.CC][termios.VMIN] = 1 + mode[tty.CC][termios.VTIME] = 0 + termios.tcsetattr(fd, when, mode) + + +class AnsibleTimeoutExceeded(Exception): + pass + + +def timeout_handler(signum, frame): + raise AnsibleTimeoutExceeded + + +def clear_line(stdout): + stdout.write(b'\x1b[%s' % MOVE_TO_BOL) + stdout.write(b'\x1b[%s' % CLEAR_TO_EOL) + + +def is_interactive(fd=None): + if fd is None: + return False + + if isatty(fd): + # Compare the current process group to the process group associated + # with terminal of the given file descriptor to determine if the process + # is running in the background. + return getpgrp() == tcgetpgrp(fd) + else: + return False + + +class ActionModule(ActionBase): + ''' pauses execution for a length or time, or until input is received ''' + + BYPASS_HOST_LOOP = True + + def run(self, tmp=None, task_vars=None): + ''' run the pause action module ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + validation_result, new_module_args = self.validate_argument_spec( + argument_spec={ + 'echo': {'type': 'bool', 'default': True}, + 'minutes': {'type': int}, # Don't break backwards compat, allow floats, by using int callable + 'seconds': {'type': int}, # Don't break backwards compat, allow floats, by using int callable + 'prompt': {'type': 'str'}, + }, + mutually_exclusive=( + ('minutes', 'seconds'), + ), + ) + + duration_unit = 'minutes' + prompt = None + seconds = None + echo = new_module_args['echo'] + echo_prompt = '' + result.update(dict( + changed=False, + rc=0, + stderr='', + stdout='', + start=None, + stop=None, + delta=None, + echo=echo + )) + + # Add a note saying the output is hidden if echo is disabled + if not echo: + echo_prompt = ' (output is hidden)' + + if new_module_args['prompt']: + prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), new_module_args['prompt'], echo_prompt) + else: + # If no custom prompt is specified, set a default prompt + prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), 'Press enter to continue, Ctrl+C to interrupt', echo_prompt) + + if new_module_args['minutes'] is not None: + seconds = new_module_args['minutes'] * 60 + elif new_module_args['seconds'] is not None: + seconds = new_module_args['seconds'] + duration_unit = 'seconds' + + ######################################################################## + # Begin the hard work! + + start = time.time() + result['start'] = to_text(datetime.datetime.now()) + result['user_input'] = b'' + + stdin_fd = None + old_settings = None + try: + if seconds is not None: + if seconds < 1: + seconds = 1 + + # setup the alarm handler + signal.signal(signal.SIGALRM, timeout_handler) + signal.alarm(seconds) + + # show the timer and control prompts + display.display("Pausing for %d seconds%s" % (seconds, echo_prompt)) + display.display("(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"), + + # show the prompt specified in the task + if new_module_args['prompt']: + display.display(prompt) + + else: + display.display(prompt) + + # save the attributes on the existing (duped) stdin so + # that we can restore them later after we set raw mode + stdin_fd = None + stdout_fd = None + try: + stdin = self._connection._new_stdin.buffer + stdout = sys.stdout.buffer + stdin_fd = stdin.fileno() + stdout_fd = stdout.fileno() + except (ValueError, AttributeError): + # ValueError: someone is using a closed file descriptor as stdin + # AttributeError: someone is using a null file descriptor as stdin on windoze + stdin = None + interactive = is_interactive(stdin_fd) + if interactive: + # grab actual Ctrl+C sequence + try: + intr = termios.tcgetattr(stdin_fd)[6][termios.VINTR] + except Exception: + # unsupported/not present, use default + intr = b'\x03' # value for Ctrl+C + + # get backspace sequences + try: + backspace = termios.tcgetattr(stdin_fd)[6][termios.VERASE] + except Exception: + backspace = [b'\x7f', b'\x08'] + + old_settings = termios.tcgetattr(stdin_fd) + setraw(stdin_fd) + + # Only set stdout to raw mode if it is a TTY. This is needed when redirecting + # stdout to a file since a file cannot be set to raw mode. + if isatty(stdout_fd): + setraw(stdout_fd) + + # Only echo input if no timeout is specified + if not seconds and echo: + new_settings = termios.tcgetattr(stdin_fd) + new_settings[3] = new_settings[3] | termios.ECHO + termios.tcsetattr(stdin_fd, termios.TCSANOW, new_settings) + + # flush the buffer to make sure no previous key presses + # are read in below + termios.tcflush(stdin, termios.TCIFLUSH) + + while True: + if not interactive: + if seconds is None: + display.warning("Not waiting for response to prompt as stdin is not interactive") + if seconds is not None: + # Give the signal handler enough time to timeout + time.sleep(seconds + 1) + break + + try: + key_pressed = stdin.read(1) + + if key_pressed == intr: # value for Ctrl+C + clear_line(stdout) + raise KeyboardInterrupt + + if not seconds: + # read key presses and act accordingly + if key_pressed in (b'\r', b'\n'): + clear_line(stdout) + break + elif key_pressed in backspace: + # delete a character if backspace is pressed + result['user_input'] = result['user_input'][:-1] + clear_line(stdout) + if echo: + stdout.write(result['user_input']) + stdout.flush() + else: + result['user_input'] += key_pressed + + except KeyboardInterrupt: + signal.alarm(0) + display.display("Press 'C' to continue the play or 'A' to abort \r"), + if self._c_or_a(stdin): + clear_line(stdout) + break + + clear_line(stdout) + + raise AnsibleError('user requested abort!') + + except AnsibleTimeoutExceeded: + # this is the exception we expect when the alarm signal + # fires, so we simply ignore it to move into the cleanup + pass + finally: + # cleanup and save some information + # restore the old settings for the duped stdin stdin_fd + if not (None in (stdin_fd, old_settings)) and isatty(stdin_fd): + termios.tcsetattr(stdin_fd, termios.TCSADRAIN, old_settings) + + duration = time.time() - start + result['stop'] = to_text(datetime.datetime.now()) + result['delta'] = int(duration) + + if duration_unit == 'minutes': + duration = round(duration / 60.0, 2) + else: + duration = round(duration, 2) + result['stdout'] = "Paused for %s %s" % (duration, duration_unit) + + result['user_input'] = to_text(result['user_input'], errors='surrogate_or_strict') + return result + + def _c_or_a(self, stdin): + while True: + key_pressed = stdin.read(1) + if key_pressed.lower() == b'a': + return False + elif key_pressed.lower() == b'c': + return True diff --git a/lib/ansible/plugins/action/raw.py b/lib/ansible/plugins/action/raw.py new file mode 100644 index 0000000..b82ed34 --- /dev/null +++ b/lib/ansible/plugins/action/raw.py @@ -0,0 +1,50 @@ +# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + TRANSFERS_FILES = False + + def run(self, tmp=None, task_vars=None): + if task_vars is None: + task_vars = dict() + + if self._task.environment and any(self._task.environment): + self._display.warning('raw module does not support the environment keyword') + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + if self._play_context.check_mode: + # in --check mode, always skip this module execution + result['skipped'] = True + return result + + executable = self._task.args.get('executable', False) + result.update(self._low_level_execute_command(self._task.args.get('_raw_params'), executable=executable)) + + result['changed'] = True + + if 'rc' in result and result['rc'] != 0: + result['failed'] = True + result['msg'] = 'non-zero return code' + + return result diff --git a/lib/ansible/plugins/action/reboot.py b/lib/ansible/plugins/action/reboot.py new file mode 100644 index 0000000..40447d1 --- /dev/null +++ b/lib/ansible/plugins/action/reboot.py @@ -0,0 +1,465 @@ +# Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com> +# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import random +import time + +from datetime import datetime, timedelta + +from ansible.errors import AnsibleError, AnsibleConnectionFailure +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.validation import check_type_list, check_type_str +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display + +display = Display() + + +class TimedOutException(Exception): + pass + + +class ActionModule(ActionBase): + TRANSFERS_FILES = False + _VALID_ARGS = frozenset(( + 'boot_time_command', + 'connect_timeout', + 'msg', + 'post_reboot_delay', + 'pre_reboot_delay', + 'reboot_command', + 'reboot_timeout', + 'search_paths', + 'test_command', + )) + + DEFAULT_REBOOT_TIMEOUT = 600 + DEFAULT_CONNECT_TIMEOUT = None + DEFAULT_PRE_REBOOT_DELAY = 0 + DEFAULT_POST_REBOOT_DELAY = 0 + DEFAULT_TEST_COMMAND = 'whoami' + DEFAULT_BOOT_TIME_COMMAND = 'cat /proc/sys/kernel/random/boot_id' + DEFAULT_REBOOT_MESSAGE = 'Reboot initiated by Ansible' + DEFAULT_SHUTDOWN_COMMAND = 'shutdown' + DEFAULT_SHUTDOWN_COMMAND_ARGS = '-r {delay_min} "{message}"' + DEFAULT_SUDOABLE = True + + DEPRECATED_ARGS = {} # type: dict[str, str] + + BOOT_TIME_COMMANDS = { + 'freebsd': '/sbin/sysctl kern.boottime', + 'openbsd': '/sbin/sysctl kern.boottime', + 'macosx': 'who -b', + 'solaris': 'who -b', + 'sunos': 'who -b', + 'vmkernel': 'grep booted /var/log/vmksummary.log | tail -n 1', + 'aix': 'who -b', + } + + SHUTDOWN_COMMANDS = { + 'alpine': 'reboot', + 'vmkernel': 'reboot', + } + + SHUTDOWN_COMMAND_ARGS = { + 'alpine': '', + 'void': '-r +{delay_min} "{message}"', + 'freebsd': '-r +{delay_sec}s "{message}"', + 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS, + 'macosx': '-r +{delay_min} "{message}"', + 'openbsd': '-r +{delay_min} "{message}"', + 'solaris': '-y -g {delay_sec} -i 6 "{message}"', + 'sunos': '-y -g {delay_sec} -i 6 "{message}"', + 'vmkernel': '-d {delay_sec}', + 'aix': '-Fr', + } + + TEST_COMMANDS = { + 'solaris': 'who', + 'vmkernel': 'who', + } + + def __init__(self, *args, **kwargs): + super(ActionModule, self).__init__(*args, **kwargs) + + @property + def pre_reboot_delay(self): + return self._check_delay('pre_reboot_delay', self.DEFAULT_PRE_REBOOT_DELAY) + + @property + def post_reboot_delay(self): + return self._check_delay('post_reboot_delay', self.DEFAULT_POST_REBOOT_DELAY) + + def _check_delay(self, key, default): + """Ensure that the value is positive or zero""" + value = int(self._task.args.get(key, self._task.args.get(key + '_sec', default))) + if value < 0: + value = 0 + return value + + def _get_value_from_facts(self, variable_name, distribution, default_value): + """Get dist+version specific args first, then distribution, then family, lastly use default""" + attr = getattr(self, variable_name) + value = attr.get( + distribution['name'] + distribution['version'], + attr.get( + distribution['name'], + attr.get( + distribution['family'], + getattr(self, default_value)))) + return value + + def get_shutdown_command_args(self, distribution): + reboot_command = self._task.args.get('reboot_command') + if reboot_command is not None: + try: + reboot_command = check_type_str(reboot_command, allow_conversion=False) + except TypeError as e: + raise AnsibleError("Invalid value given for 'reboot_command': %s." % to_native(e)) + + # No args were provided + try: + return reboot_command.split(' ', 1)[1] + except IndexError: + return '' + else: + args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS') + + # Convert seconds to minutes. If less that 60, set it to 0. + delay_min = self.pre_reboot_delay // 60 + reboot_message = self._task.args.get('msg', self.DEFAULT_REBOOT_MESSAGE) + return args.format(delay_sec=self.pre_reboot_delay, delay_min=delay_min, message=reboot_message) + + def get_distribution(self, task_vars): + # FIXME: only execute the module if we don't already have the facts we need + distribution = {} + display.debug('{action}: running setup module to get distribution'.format(action=self._task.action)) + module_output = self._execute_module( + task_vars=task_vars, + module_name='ansible.legacy.setup', + module_args={'gather_subset': 'min'}) + try: + if module_output.get('failed', False): + raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format( + to_native(module_output['module_stdout']).strip(), + to_native(module_output['module_stderr']).strip())) + distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower() + distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0]) + distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower()) + display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution)) + return distribution + except KeyError as ke: + raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0])) + + def get_shutdown_command(self, task_vars, distribution): + reboot_command = self._task.args.get('reboot_command') + if reboot_command is not None: + try: + reboot_command = check_type_str(reboot_command, allow_conversion=False) + except TypeError as e: + raise AnsibleError("Invalid value given for 'reboot_command': %s." % to_native(e)) + shutdown_bin = reboot_command.split(' ', 1)[0] + else: + shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND') + + if shutdown_bin[0] == '/': + return shutdown_bin + else: + default_search_paths = ['/sbin', '/bin', '/usr/sbin', '/usr/bin', '/usr/local/sbin'] + search_paths = self._task.args.get('search_paths', default_search_paths) + + try: + # Convert bare strings to a list + search_paths = check_type_list(search_paths) + except TypeError: + err_msg = "'search_paths' must be a string or flat list of strings, got {0}" + raise AnsibleError(err_msg.format(search_paths)) + + display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format( + action=self._task.action, + command=shutdown_bin, + paths=search_paths)) + + find_result = self._execute_module( + task_vars=task_vars, + # prevent collection search by calling with ansible.legacy (still allows library/ override of find) + module_name='ansible.legacy.find', + module_args={ + 'paths': search_paths, + 'patterns': [shutdown_bin], + 'file_type': 'any' + } + ) + + full_path = [x['path'] for x in find_result['files']] + if not full_path: + raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths)) + return full_path[0] + + def deprecated_args(self): + for arg, version in self.DEPRECATED_ARGS.items(): + if self._task.args.get(arg) is not None: + display.warning("Since Ansible {version}, {arg} is no longer a valid option for {action}".format( + version=version, + arg=arg, + action=self._task.action)) + + def get_system_boot_time(self, distribution): + boot_time_command = self._get_value_from_facts('BOOT_TIME_COMMANDS', distribution, 'DEFAULT_BOOT_TIME_COMMAND') + if self._task.args.get('boot_time_command'): + boot_time_command = self._task.args.get('boot_time_command') + + try: + check_type_str(boot_time_command, allow_conversion=False) + except TypeError as e: + raise AnsibleError("Invalid value given for 'boot_time_command': %s." % to_native(e)) + + display.debug("{action}: getting boot time with command: '{command}'".format(action=self._task.action, command=boot_time_command)) + command_result = self._low_level_execute_command(boot_time_command, sudoable=self.DEFAULT_SUDOABLE) + + if command_result['rc'] != 0: + stdout = command_result['stdout'] + stderr = command_result['stderr'] + raise AnsibleError("{action}: failed to get host boot time info, rc: {rc}, stdout: {out}, stderr: {err}".format( + action=self._task.action, + rc=command_result['rc'], + out=to_native(stdout), + err=to_native(stderr))) + display.debug("{action}: last boot time: {boot}".format(action=self._task.action, boot=command_result['stdout'].strip())) + return command_result['stdout'].strip() + + def check_boot_time(self, distribution, previous_boot_time): + display.vvv("{action}: attempting to get system boot time".format(action=self._task.action)) + connect_timeout = self._task.args.get('connect_timeout', self._task.args.get('connect_timeout_sec', self.DEFAULT_CONNECT_TIMEOUT)) + + # override connection timeout from defaults to custom value + if connect_timeout: + try: + display.debug("{action}: setting connect_timeout to {value}".format(action=self._task.action, value=connect_timeout)) + self._connection.set_option("connection_timeout", connect_timeout) + self._connection.reset() + except AttributeError: + display.warning("Connection plugin does not allow the connection timeout to be overridden") + + # try and get boot time + try: + current_boot_time = self.get_system_boot_time(distribution) + except Exception as e: + raise e + + # FreeBSD returns an empty string immediately before reboot so adding a length + # check to prevent prematurely assuming system has rebooted + if len(current_boot_time) == 0 or current_boot_time == previous_boot_time: + raise ValueError("boot time has not changed") + + def run_test_command(self, distribution, **kwargs): + test_command = self._task.args.get('test_command', self._get_value_from_facts('TEST_COMMANDS', distribution, 'DEFAULT_TEST_COMMAND')) + display.vvv("{action}: attempting post-reboot test command".format(action=self._task.action)) + display.debug("{action}: attempting post-reboot test command '{command}'".format(action=self._task.action, command=test_command)) + try: + command_result = self._low_level_execute_command(test_command, sudoable=self.DEFAULT_SUDOABLE) + except Exception: + # may need to reset the connection in case another reboot occurred + # which has invalidated our connection + try: + self._connection.reset() + except AttributeError: + pass + raise + + if command_result['rc'] != 0: + msg = 'Test command failed: {err} {out}'.format( + err=to_native(command_result['stderr']), + out=to_native(command_result['stdout'])) + raise RuntimeError(msg) + + display.vvv("{action}: system successfully rebooted".format(action=self._task.action)) + + def do_until_success_or_timeout(self, action, reboot_timeout, action_desc, distribution, action_kwargs=None): + max_end_time = datetime.utcnow() + timedelta(seconds=reboot_timeout) + if action_kwargs is None: + action_kwargs = {} + + fail_count = 0 + max_fail_sleep = 12 + + while datetime.utcnow() < max_end_time: + try: + action(distribution=distribution, **action_kwargs) + if action_desc: + display.debug('{action}: {desc} success'.format(action=self._task.action, desc=action_desc)) + return + except Exception as e: + if isinstance(e, AnsibleConnectionFailure): + try: + self._connection.reset() + except AnsibleConnectionFailure: + pass + # Use exponential backoff with a max timout, plus a little bit of randomness + random_int = random.randint(0, 1000) / 1000 + fail_sleep = 2 ** fail_count + random_int + if fail_sleep > max_fail_sleep: + + fail_sleep = max_fail_sleep + random_int + if action_desc: + try: + error = to_text(e).splitlines()[-1] + except IndexError as e: + error = to_text(e) + display.debug("{action}: {desc} fail '{err}', retrying in {sleep:.4} seconds...".format( + action=self._task.action, + desc=action_desc, + err=error, + sleep=fail_sleep)) + fail_count += 1 + time.sleep(fail_sleep) + + raise TimedOutException('Timed out waiting for {desc} (timeout={timeout})'.format(desc=action_desc, timeout=reboot_timeout)) + + def perform_reboot(self, task_vars, distribution): + result = {} + reboot_result = {} + shutdown_command = self.get_shutdown_command(task_vars, distribution) + shutdown_command_args = self.get_shutdown_command_args(distribution) + reboot_command = '{0} {1}'.format(shutdown_command, shutdown_command_args) + + try: + display.vvv("{action}: rebooting server...".format(action=self._task.action)) + display.debug("{action}: rebooting server with command '{command}'".format(action=self._task.action, command=reboot_command)) + reboot_result = self._low_level_execute_command(reboot_command, sudoable=self.DEFAULT_SUDOABLE) + except AnsibleConnectionFailure as e: + # If the connection is closed too quickly due to the system being shutdown, carry on + display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e))) + reboot_result['rc'] = 0 + + result['start'] = datetime.utcnow() + + if reboot_result['rc'] != 0: + result['failed'] = True + result['rebooted'] = False + result['msg'] = "Reboot command failed. Error was: '{stdout}, {stderr}'".format( + stdout=to_native(reboot_result['stdout'].strip()), + stderr=to_native(reboot_result['stderr'].strip())) + return result + + result['failed'] = False + return result + + def validate_reboot(self, distribution, original_connection_timeout=None, action_kwargs=None): + display.vvv('{action}: validating reboot'.format(action=self._task.action)) + result = {} + + try: + # keep on checking system boot_time with short connection responses + reboot_timeout = int(self._task.args.get('reboot_timeout', self._task.args.get('reboot_timeout_sec', self.DEFAULT_REBOOT_TIMEOUT))) + + self.do_until_success_or_timeout( + action=self.check_boot_time, + action_desc="last boot time check", + reboot_timeout=reboot_timeout, + distribution=distribution, + action_kwargs=action_kwargs) + + # Get the connect_timeout set on the connection to compare to the original + try: + connect_timeout = self._connection.get_option('connection_timeout') + except KeyError: + pass + else: + if original_connection_timeout != connect_timeout: + try: + display.debug("{action}: setting connect_timeout back to original value of {value}".format( + action=self._task.action, + value=original_connection_timeout)) + self._connection.set_option("connection_timeout", original_connection_timeout) + self._connection.reset() + except (AnsibleError, AttributeError) as e: + # reset the connection to clear the custom connection timeout + display.debug("{action}: failed to reset connection_timeout back to default: {error}".format(action=self._task.action, + error=to_text(e))) + + # finally run test command to ensure everything is working + # FUTURE: add a stability check (system must remain up for N seconds) to deal with self-multi-reboot updates + self.do_until_success_or_timeout( + action=self.run_test_command, + action_desc="post-reboot test command", + reboot_timeout=reboot_timeout, + distribution=distribution, + action_kwargs=action_kwargs) + + result['rebooted'] = True + result['changed'] = True + + except TimedOutException as toex: + result['failed'] = True + result['rebooted'] = True + result['msg'] = to_text(toex) + return result + + return result + + def run(self, tmp=None, task_vars=None): + self._supports_check_mode = True + self._supports_async = True + + # If running with local connection, fail so we don't reboot ourself + if self._connection.transport == 'local': + msg = 'Running {0} with local connection would reboot the control node.'.format(self._task.action) + return {'changed': False, 'elapsed': 0, 'rebooted': False, 'failed': True, 'msg': msg} + + if self._play_context.check_mode: + return {'changed': True, 'elapsed': 0, 'rebooted': True} + + if task_vars is None: + task_vars = {} + + self.deprecated_args() + + result = super(ActionModule, self).run(tmp, task_vars) + + if result.get('skipped', False) or result.get('failed', False): + return result + + distribution = self.get_distribution(task_vars) + + # Get current boot time + try: + previous_boot_time = self.get_system_boot_time(distribution) + except Exception as e: + result['failed'] = True + result['reboot'] = False + result['msg'] = to_text(e) + return result + + # Get the original connection_timeout option var so it can be reset after + original_connection_timeout = None + try: + original_connection_timeout = self._connection.get_option('connection_timeout') + display.debug("{action}: saving original connect_timeout of {timeout}".format(action=self._task.action, timeout=original_connection_timeout)) + except KeyError: + display.debug("{action}: connect_timeout connection option has not been set".format(action=self._task.action)) + # Initiate reboot + reboot_result = self.perform_reboot(task_vars, distribution) + + if reboot_result['failed']: + result = reboot_result + elapsed = datetime.utcnow() - reboot_result['start'] + result['elapsed'] = elapsed.seconds + return result + + if self.post_reboot_delay != 0: + display.debug("{action}: waiting an additional {delay} seconds".format(action=self._task.action, delay=self.post_reboot_delay)) + display.vvv("{action}: waiting an additional {delay} seconds".format(action=self._task.action, delay=self.post_reboot_delay)) + time.sleep(self.post_reboot_delay) + + # Make sure reboot was successful + result = self.validate_reboot(distribution, original_connection_timeout, action_kwargs={'previous_boot_time': previous_boot_time}) + + elapsed = datetime.utcnow() - reboot_result['start'] + result['elapsed'] = elapsed.seconds + + return result diff --git a/lib/ansible/plugins/action/script.py b/lib/ansible/plugins/action/script.py new file mode 100644 index 0000000..1bbb800 --- /dev/null +++ b/lib/ansible/plugins/action/script.py @@ -0,0 +1,160 @@ +# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import re +import shlex + +from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail, AnsibleActionSkip +from ansible.executor.powershell import module_manifest as ps_manifest +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + + # On Windows platform, absolute paths begin with a (back)slash + # after chopping off a potential drive letter. + windows_absolute_path_detection = re.compile(r'^(?:[a-zA-Z]\:)?(\\|\/)') + + def run(self, tmp=None, task_vars=None): + ''' handler for file transfer operations ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + try: + creates = self._task.args.get('creates') + if creates: + # do not run the command if the line contains creates=filename + # and the filename already exists. This allows idempotence + # of command executions. + if self._remote_file_exists(creates): + raise AnsibleActionSkip("%s exists, matching creates option" % creates) + + removes = self._task.args.get('removes') + if removes: + # do not run the command if the line contains removes=filename + # and the filename does not exist. This allows idempotence + # of command executions. + if not self._remote_file_exists(removes): + raise AnsibleActionSkip("%s does not exist, matching removes option" % removes) + + # The chdir must be absolute, because a relative path would rely on + # remote node behaviour & user config. + chdir = self._task.args.get('chdir') + if chdir: + # Powershell is the only Windows-path aware shell + if getattr(self._connection._shell, "_IS_WINDOWS", False) and \ + not self.windows_absolute_path_detection.match(chdir): + raise AnsibleActionFail('chdir %s must be an absolute path for a Windows remote node' % chdir) + # Every other shell is unix-path-aware. + if not getattr(self._connection._shell, "_IS_WINDOWS", False) and not chdir.startswith('/'): + raise AnsibleActionFail('chdir %s must be an absolute path for a Unix-aware remote node' % chdir) + + # Split out the script as the first item in raw_params using + # shlex.split() in order to support paths and files with spaces in the name. + # Any arguments passed to the script will be added back later. + raw_params = to_native(self._task.args.get('_raw_params', ''), errors='surrogate_or_strict') + parts = [to_text(s, errors='surrogate_or_strict') for s in shlex.split(raw_params.strip())] + source = parts[0] + + # Support executable paths and files with spaces in the name. + executable = to_native(self._task.args.get('executable', ''), errors='surrogate_or_strict') + + try: + source = self._loader.get_real_file(self._find_needle('files', source), decrypt=self._task.args.get('decrypt', True)) + except AnsibleError as e: + raise AnsibleActionFail(to_native(e)) + + if self._task.check_mode: + # check mode is supported if 'creates' or 'removes' are provided + # the task has already been skipped if a change would not occur + if self._task.args.get('creates') or self._task.args.get('removes'): + result['changed'] = True + raise _AnsibleActionDone(result=result) + # If the script doesn't return changed in the result, it defaults to True, + # but since the script may override 'changed', just skip instead of guessing. + else: + result['changed'] = False + raise AnsibleActionSkip('Check mode is not supported for this task.', result=result) + + # now we execute script, always assume changed. + result['changed'] = True + + # transfer the file to a remote tmp location + tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, + os.path.basename(source)) + + # Convert raw_params to text for the purpose of replacing the script since + # parts and tmp_src are both unicode strings and raw_params will be different + # depending on Python version. + # + # Once everything is encoded consistently, replace the script path on the remote + # system with the remainder of the raw_params. This preserves quoting in parameters + # that would have been removed by shlex.split(). + target_command = to_text(raw_params).strip().replace(parts[0], tmp_src) + + self._transfer_file(source, tmp_src) + + # set file permissions, more permissive when the copy is done as a different user + self._fixup_perms2((self._connection._shell.tmpdir, tmp_src), execute=True) + + # add preparation steps to one ssh roundtrip executing the script + env_dict = dict() + env_string = self._compute_environment_string(env_dict) + + if executable: + script_cmd = ' '.join([env_string, executable, target_command]) + else: + script_cmd = ' '.join([env_string, target_command]) + + script_cmd = self._connection._shell.wrap_for_exec(script_cmd) + + exec_data = None + # PowerShell runs the script in a special wrapper to enable things + # like become and environment args + if getattr(self._connection._shell, "_IS_WINDOWS", False): + # FUTURE: use a more public method to get the exec payload + pc = self._play_context + exec_data = ps_manifest._create_powershell_wrapper( + to_bytes(script_cmd), source, {}, env_dict, self._task.async_val, + pc.become, pc.become_method, pc.become_user, + pc.become_pass, pc.become_flags, "script", task_vars, None + ) + # build the necessary exec wrapper command + # FUTURE: this still doesn't let script work on Windows with non-pipelined connections or + # full manual exec of KEEP_REMOTE_FILES + script_cmd = self._connection._shell.build_module_command(env_string='', shebang='#!powershell', cmd='') + + result.update(self._low_level_execute_command(cmd=script_cmd, in_data=exec_data, sudoable=True, chdir=chdir)) + + if 'rc' in result and result['rc'] != 0: + raise AnsibleActionFail('non-zero return code') + + except AnsibleAction as e: + result.update(e.result) + finally: + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result diff --git a/lib/ansible/plugins/action/service.py b/lib/ansible/plugins/action/service.py new file mode 100644 index 0000000..c061687 --- /dev/null +++ b/lib/ansible/plugins/action/service.py @@ -0,0 +1,103 @@ +# (c) 2015, Ansible Inc, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible.errors import AnsibleAction, AnsibleActionFail +from ansible.executor.module_common import get_action_args_with_defaults +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = False + + UNUSED_PARAMS = { + 'systemd': ['pattern', 'runlevel', 'sleep', 'arguments', 'args'], + } + + # HACK: list of unqualified service manager names that are/were built-in, we'll prefix these with `ansible.legacy` to + # avoid collisions with collections search + BUILTIN_SVC_MGR_MODULES = set(['openwrt_init', 'service', 'systemd', 'sysvinit']) + + def run(self, tmp=None, task_vars=None): + ''' handler for package operations ''' + + self._supports_check_mode = True + self._supports_async = True + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + module = self._task.args.get('use', 'auto').lower() + + if module == 'auto': + try: + if self._task.delegate_to: # if we delegate, we should use delegated host's facts + module = self._templar.template("{{hostvars['%s']['ansible_facts']['service_mgr']}}" % self._task.delegate_to) + else: + module = self._templar.template('{{ansible_facts.service_mgr}}') + except Exception: + pass # could not get it from template! + + try: + if module == 'auto': + facts = self._execute_module( + module_name='ansible.legacy.setup', + module_args=dict(gather_subset='!all', filter='ansible_service_mgr'), task_vars=task_vars) + self._display.debug("Facts %s" % facts) + module = facts.get('ansible_facts', {}).get('ansible_service_mgr', 'auto') + + if not module or module == 'auto' or not self._shared_loader_obj.module_loader.has_plugin(module): + module = 'ansible.legacy.service' + + if module != 'auto': + # run the 'service' module + new_module_args = self._task.args.copy() + if 'use' in new_module_args: + del new_module_args['use'] + + if module in self.UNUSED_PARAMS: + for unused in self.UNUSED_PARAMS[module]: + if unused in new_module_args: + del new_module_args[unused] + self._display.warning('Ignoring "%s" as it is not used in "%s"' % (unused, module)) + + # get defaults for specific module + context = self._shared_loader_obj.module_loader.find_plugin_with_context(module, collection_list=self._task.collections) + new_module_args = get_action_args_with_defaults( + context.resolved_fqcn, new_module_args, self._task.module_defaults, self._templar, + action_groups=self._task._parent._play._action_groups + ) + + # collection prefix known internal modules to avoid collisions from collections search, while still allowing library/ overrides + if module in self.BUILTIN_SVC_MGR_MODULES: + module = 'ansible.legacy.' + module + + self._display.vvvv("Running %s" % module) + result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val)) + else: + raise AnsibleActionFail('Could not detect which service manager to use. Try gathering facts or setting the "use" option.') + + except AnsibleAction as e: + result.update(e.result) + finally: + if not self._task.async_val: + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result diff --git a/lib/ansible/plugins/action/set_fact.py b/lib/ansible/plugins/action/set_fact.py new file mode 100644 index 0000000..ae92de8 --- /dev/null +++ b/lib/ansible/plugins/action/set_fact.py @@ -0,0 +1,68 @@ +# Copyright 2013 Dag Wieers <dag@wieers.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleActionFail +from ansible.module_utils.six import string_types +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase +from ansible.utils.vars import isidentifier + +import ansible.constants as C + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = False + + def run(self, tmp=None, task_vars=None): + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + facts = {} + cacheable = boolean(self._task.args.pop('cacheable', False)) + + if self._task.args: + for (k, v) in self._task.args.items(): + k = self._templar.template(k) + + if not isidentifier(k): + raise AnsibleActionFail("The variable name '%s' is not valid. Variables must start with a letter or underscore character, " + "and contain only letters, numbers and underscores." % k) + + # NOTE: this should really use BOOLEANS from convert_bool, but only in the k=v case, + # right now it converts matching explicit YAML strings also when 'jinja2_native' is disabled. + if not C.DEFAULT_JINJA2_NATIVE and isinstance(v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'): + v = boolean(v, strict=False) + facts[k] = v + else: + raise AnsibleActionFail('No key/value pairs provided, at least one is required for this action to succeed') + + if facts: + # just as _facts actions, we don't set changed=true as we are not modifying the actual host + result['ansible_facts'] = facts + result['_ansible_facts_cacheable'] = cacheable + else: + # this should not happen, but JIC we get here + raise AnsibleActionFail('Unable to create any variables with provided arguments') + + return result diff --git a/lib/ansible/plugins/action/set_stats.py b/lib/ansible/plugins/action/set_stats.py new file mode 100644 index 0000000..9d429ce --- /dev/null +++ b/lib/ansible/plugins/action/set_stats.py @@ -0,0 +1,77 @@ +# Copyright 2016 Ansible (RedHat, Inc) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils.six import string_types +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase +from ansible.utils.vars import isidentifier + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = False + _VALID_ARGS = frozenset(('aggregate', 'data', 'per_host')) + + # TODO: document this in non-empty set_stats.py module + def run(self, tmp=None, task_vars=None): + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + stats = {'data': {}, 'per_host': False, 'aggregate': True} + + if self._task.args: + data = self._task.args.get('data', {}) + + if not isinstance(data, dict): + data = self._templar.template(data, convert_bare=False, fail_on_undefined=True) + + if not isinstance(data, dict): + result['failed'] = True + result['msg'] = "The 'data' option needs to be a dictionary/hash" + return result + + # set boolean options, defaults are set above in stats init + for opt in ['per_host', 'aggregate']: + val = self._task.args.get(opt, None) + if val is not None: + if not isinstance(val, bool): + stats[opt] = boolean(self._templar.template(val), strict=False) + else: + stats[opt] = val + + for (k, v) in data.items(): + + k = self._templar.template(k) + + if not isidentifier(k): + result['failed'] = True + result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only " + "letters, numbers and underscores." % k) + return result + + stats['data'][k] = self._templar.template(v) + + result['changed'] = False + result['ansible_stats'] = stats + + return result diff --git a/lib/ansible/plugins/action/shell.py b/lib/ansible/plugins/action/shell.py new file mode 100644 index 0000000..617a373 --- /dev/null +++ b/lib/ansible/plugins/action/shell.py @@ -0,0 +1,27 @@ +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + # Shell module is implemented via command with a special arg + self._task.args['_uses_shell'] = True + + command_action = self._shared_loader_obj.action_loader.get('ansible.legacy.command', + task=self._task, + connection=self._connection, + play_context=self._play_context, + loader=self._loader, + templar=self._templar, + shared_loader_obj=self._shared_loader_obj) + result = command_action.run(task_vars=task_vars) + + return result diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py new file mode 100644 index 0000000..d2b3df9 --- /dev/null +++ b/lib/ansible/plugins/action/template.py @@ -0,0 +1,190 @@ +# Copyright: (c) 2015, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import shutil +import stat +import tempfile + +from ansible import constants as C +from ansible.config.manager import ensure_type +from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail +from ansible.module_utils._text import to_bytes, to_text, to_native +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import string_types +from ansible.plugins.action import ActionBase +from ansible.template import generate_ansible_template_vars, AnsibleEnvironment + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + DEFAULT_NEWLINE_SEQUENCE = "\n" + + def run(self, tmp=None, task_vars=None): + ''' handler for template operations ''' + + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + # Options type validation + # stings + for s_type in ('src', 'dest', 'state', 'newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string', + 'block_end_string', 'comment_start_string', 'comment_end_string'): + if s_type in self._task.args: + value = ensure_type(self._task.args[s_type], 'string') + if value is not None and not isinstance(value, string_types): + raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value))) + self._task.args[s_type] = value + + # booleans + try: + follow = boolean(self._task.args.get('follow', False), strict=False) + trim_blocks = boolean(self._task.args.get('trim_blocks', True), strict=False) + lstrip_blocks = boolean(self._task.args.get('lstrip_blocks', False), strict=False) + except TypeError as e: + raise AnsibleActionFail(to_native(e)) + + # assign to local vars for ease of use + source = self._task.args.get('src', None) + dest = self._task.args.get('dest', None) + state = self._task.args.get('state', None) + newline_sequence = self._task.args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE) + variable_start_string = self._task.args.get('variable_start_string', None) + variable_end_string = self._task.args.get('variable_end_string', None) + block_start_string = self._task.args.get('block_start_string', None) + block_end_string = self._task.args.get('block_end_string', None) + comment_start_string = self._task.args.get('comment_start_string', None) + comment_end_string = self._task.args.get('comment_end_string', None) + output_encoding = self._task.args.get('output_encoding', 'utf-8') or 'utf-8' + + wrong_sequences = ["\\n", "\\r", "\\r\\n"] + allowed_sequences = ["\n", "\r", "\r\n"] + + # We need to convert unescaped sequences to proper escaped sequences for Jinja2 + if newline_sequence in wrong_sequences: + newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)] + + try: + # logical validation + if state is not None: + raise AnsibleActionFail("'state' cannot be specified on a template") + elif source is None or dest is None: + raise AnsibleActionFail("src and dest are required") + elif newline_sequence not in allowed_sequences: + raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n") + else: + try: + source = self._find_needle('templates', source) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + mode = self._task.args.get('mode', None) + if mode == 'preserve': + mode = '0%03o' % stat.S_IMODE(os.stat(source).st_mode) + + # Get vault decrypted tmp file + try: + tmp_source = self._loader.get_real_file(source) + except AnsibleFileNotFound as e: + raise AnsibleActionFail("could not find src=%s, %s" % (source, to_text(e))) + b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict') + + # template the source data locally & get ready to transfer + try: + with open(b_tmp_source, 'rb') as f: + try: + template_data = to_text(f.read(), errors='surrogate_or_strict') + except UnicodeError: + raise AnsibleActionFail("Template source files must be utf-8 encoded") + + # set jinja2 internal search path for includes + searchpath = task_vars.get('ansible_search_path', []) + searchpath.extend([self._loader._basedir, os.path.dirname(source)]) + + # We want to search into the 'templates' subdir of each search path in + # addition to our original search paths. + newsearchpath = [] + for p in searchpath: + newsearchpath.append(os.path.join(p, 'templates')) + newsearchpath.append(p) + searchpath = newsearchpath + + # add ansible 'template' vars + temp_vars = task_vars.copy() + # NOTE in the case of ANSIBLE_DEBUG=1 task_vars is VarsWithSources(MutableMapping) + # so | operator cannot be used as it can be used only on dicts + # https://peps.python.org/pep-0584/#what-about-mapping-and-mutablemapping + temp_vars.update(generate_ansible_template_vars(self._task.args.get('src', None), source, dest)) + + # force templar to use AnsibleEnvironment to prevent issues with native types + # https://github.com/ansible/ansible/issues/46169 + templar = self._templar.copy_with_new_env(environment_class=AnsibleEnvironment, + searchpath=searchpath, + newline_sequence=newline_sequence, + block_start_string=block_start_string, + block_end_string=block_end_string, + variable_start_string=variable_start_string, + variable_end_string=variable_end_string, + comment_start_string=comment_start_string, + comment_end_string=comment_end_string, + trim_blocks=trim_blocks, + lstrip_blocks=lstrip_blocks, + available_variables=temp_vars) + resultant = templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False) + except AnsibleAction: + raise + except Exception as e: + raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e))) + finally: + self._loader.cleanup_tmp_file(b_tmp_source) + + new_task = self._task.copy() + # mode is either the mode from task.args or the mode of the source file if the task.args + # mode == 'preserve' + new_task.args['mode'] = mode + + # remove 'template only' options: + for remove in ('newline_sequence', 'block_start_string', 'block_end_string', 'variable_start_string', 'variable_end_string', + 'comment_start_string', 'comment_end_string', 'trim_blocks', 'lstrip_blocks', 'output_encoding'): + new_task.args.pop(remove, None) + + local_tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP) + + try: + result_file = os.path.join(local_tempdir, os.path.basename(source)) + with open(to_bytes(result_file, errors='surrogate_or_strict'), 'wb') as f: + f.write(to_bytes(resultant, encoding=output_encoding, errors='surrogate_or_strict')) + + new_task.args.update( + dict( + src=result_file, + dest=dest, + follow=follow, + ), + ) + # call with ansible.legacy prefix to eliminate collisions with collections while still allowing local override + copy_action = self._shared_loader_obj.action_loader.get('ansible.legacy.copy', + task=new_task, + connection=self._connection, + play_context=self._play_context, + loader=self._loader, + templar=self._templar, + shared_loader_obj=self._shared_loader_obj) + result.update(copy_action.run(task_vars=task_vars)) + finally: + shutil.rmtree(to_bytes(local_tempdir, errors='surrogate_or_strict')) + + except AnsibleAction as e: + result.update(e.result) + finally: + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result diff --git a/lib/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py new file mode 100644 index 0000000..4d188e3 --- /dev/null +++ b/lib/ansible/plugins/action/unarchive.py @@ -0,0 +1,111 @@ +# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> +# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleActionSkip +from ansible.module_utils._text import to_text +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + + def run(self, tmp=None, task_vars=None): + ''' handler for unarchive operations ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + source = self._task.args.get('src', None) + dest = self._task.args.get('dest', None) + remote_src = boolean(self._task.args.get('remote_src', False), strict=False) + creates = self._task.args.get('creates', None) + decrypt = self._task.args.get('decrypt', True) + + try: + # "copy" is deprecated in favor of "remote_src". + if 'copy' in self._task.args: + # They are mutually exclusive. + if 'remote_src' in self._task.args: + raise AnsibleActionFail("parameters are mutually exclusive: ('copy', 'remote_src')") + # We will take the information from copy and store it in + # the remote_src var to use later in this file. + self._task.args['remote_src'] = remote_src = not boolean(self._task.args.pop('copy'), strict=False) + + if source is None or dest is None: + raise AnsibleActionFail("src (or content) and dest are required") + + if creates: + # do not run the command if the line contains creates=filename + # and the filename already exists. This allows idempotence + # of command executions. + creates = self._remote_expand_user(creates) + if self._remote_file_exists(creates): + raise AnsibleActionSkip("skipped, since %s exists" % creates) + + dest = self._remote_expand_user(dest) # CCTODO: Fix path for Windows hosts. + source = os.path.expanduser(source) + + if not remote_src: + try: + source = self._loader.get_real_file(self._find_needle('files', source), decrypt=decrypt) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + try: + remote_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=True) + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + if not remote_stat['exists'] or not remote_stat['isdir']: + raise AnsibleActionFail("dest '%s' must be an existing dir" % dest) + + if not remote_src: + # transfer the file to a remote tmp location + tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, 'source') + self._transfer_file(source, tmp_src) + + # handle diff mode client side + # handle check mode client side + + # remove action plugin only keys + new_module_args = self._task.args.copy() + for key in ('decrypt',): + if key in new_module_args: + del new_module_args[key] + + if not remote_src: + # fix file permissions when the copy is done as a different user + self._fixup_perms2((self._connection._shell.tmpdir, tmp_src)) + new_module_args['src'] = tmp_src + + # execute the unarchive module now, with the updated args (using ansible.legacy prefix to eliminate collections + # collisions with local override + result.update(self._execute_module(module_name='ansible.legacy.unarchive', module_args=new_module_args, task_vars=task_vars)) + except AnsibleAction as e: + result.update(e.result) + finally: + self._remove_tmp_path(self._connection._shell.tmpdir) + return result diff --git a/lib/ansible/plugins/action/uri.py b/lib/ansible/plugins/action/uri.py new file mode 100644 index 0000000..bbaf092 --- /dev/null +++ b/lib/ansible/plugins/action/uri.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# (c) 2015, Brian Coca <briancoca+dev@gmail.com> +# (c) 2018, Matt Martz <matt@sivel.net> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail +from ansible.module_utils._text import to_native +from ansible.module_utils.common.collections import Mapping, MutableMapping +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import text_type +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + + def run(self, tmp=None, task_vars=None): + self._supports_async = True + + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + body_format = self._task.args.get('body_format', 'raw') + body = self._task.args.get('body') + src = self._task.args.get('src', None) + remote_src = boolean(self._task.args.get('remote_src', 'no'), strict=False) + + try: + if remote_src: + # everything is remote, so we just execute the module + # without changing any of the module arguments + # call with ansible.legacy prefix to prevent collections collisions while allowing local override + raise _AnsibleActionDone(result=self._execute_module(module_name='ansible.legacy.uri', + task_vars=task_vars, wrap_async=self._task.async_val)) + + kwargs = {} + + if src: + try: + src = self._find_needle('files', src) + except AnsibleError as e: + raise AnsibleActionFail(to_native(e)) + + tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, os.path.basename(src)) + kwargs['src'] = tmp_src + self._transfer_file(src, tmp_src) + self._fixup_perms2((self._connection._shell.tmpdir, tmp_src)) + elif body_format == 'form-multipart': + if not isinstance(body, Mapping): + raise AnsibleActionFail( + 'body must be mapping, cannot be type %s' % body.__class__.__name__ + ) + for field, value in body.items(): + if not isinstance(value, MutableMapping): + continue + content = value.get('content') + filename = value.get('filename') + if not filename or content: + continue + + try: + filename = self._find_needle('files', filename) + except AnsibleError as e: + raise AnsibleActionFail(to_native(e)) + + tmp_src = self._connection._shell.join_path( + self._connection._shell.tmpdir, + os.path.basename(filename) + ) + value['filename'] = tmp_src + self._transfer_file(filename, tmp_src) + self._fixup_perms2((self._connection._shell.tmpdir, tmp_src)) + kwargs['body'] = body + + new_module_args = self._task.args | kwargs + + # call with ansible.legacy prefix to prevent collections collisions while allowing local override + result.update(self._execute_module('ansible.legacy.uri', module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val)) + except AnsibleAction as e: + result.update(e.result) + finally: + if not self._task.async_val: + self._remove_tmp_path(self._connection._shell.tmpdir) + return result diff --git a/lib/ansible/plugins/action/validate_argument_spec.py b/lib/ansible/plugins/action/validate_argument_spec.py new file mode 100644 index 0000000..dc7d6cb --- /dev/null +++ b/lib/ansible/plugins/action/validate_argument_spec.py @@ -0,0 +1,94 @@ +# Copyright 2021 Red Hat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleError +from ansible.plugins.action import ActionBase +from ansible.module_utils.six import string_types +from ansible.module_utils.common.arg_spec import ArgumentSpecValidator +from ansible.module_utils.errors import AnsibleValidationErrorMultiple +from ansible.utils.vars import combine_vars + + +class ActionModule(ActionBase): + ''' Validate an arg spec''' + + TRANSFERS_FILES = False + + def get_args_from_task_vars(self, argument_spec, task_vars): + ''' + Get any arguments that may come from `task_vars`. + + Expand templated variables so we can validate the actual values. + + :param argument_spec: A dict of the argument spec. + :param task_vars: A dict of task variables. + + :returns: A dict of values that can be validated against the arg spec. + ''' + args = {} + + for argument_name, argument_attrs in argument_spec.items(): + if argument_name in task_vars: + args[argument_name] = task_vars[argument_name] + args = self._templar.template(args) + return args + + def run(self, tmp=None, task_vars=None): + ''' + Validate an argument specification against a provided set of data. + + The `validate_argument_spec` module expects to receive the arguments: + - argument_spec: A dict whose keys are the valid argument names, and + whose values are dicts of the argument attributes (type, etc). + - provided_arguments: A dict whose keys are the argument names, and + whose values are the argument value. + + :param tmp: Deprecated. Do not use. + :param task_vars: A dict of task variables. + :return: An action result dict, including a 'argument_errors' key with a + list of validation errors found. + ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + # This action can be called from anywhere, so pass in some info about what it is + # validating args for so the error results make some sense + result['validate_args_context'] = self._task.args.get('validate_args_context', {}) + + if 'argument_spec' not in self._task.args: + raise AnsibleError('"argument_spec" arg is required in args: %s' % self._task.args) + + # Get the task var called argument_spec. This will contain the arg spec + # data dict (for the proper entry point for a role). + argument_spec_data = self._task.args.get('argument_spec') + + # the values that were passed in and will be checked against argument_spec + provided_arguments = self._task.args.get('provided_arguments', {}) + + if not isinstance(argument_spec_data, dict): + raise AnsibleError('Incorrect type for argument_spec, expected dict and got %s' % type(argument_spec_data)) + + if not isinstance(provided_arguments, dict): + raise AnsibleError('Incorrect type for provided_arguments, expected dict and got %s' % type(provided_arguments)) + + args_from_vars = self.get_args_from_task_vars(argument_spec_data, task_vars) + validator = ArgumentSpecValidator(argument_spec_data) + validation_result = validator.validate(combine_vars(args_from_vars, provided_arguments), validate_role_argument_spec=True) + + if validation_result.error_messages: + result['failed'] = True + result['msg'] = 'Validation of arguments failed:\n%s' % '\n'.join(validation_result.error_messages) + result['argument_spec_data'] = argument_spec_data + result['argument_errors'] = validation_result.error_messages + return result + + result['changed'] = False + result['msg'] = 'The arg spec validation passed' + + return result diff --git a/lib/ansible/plugins/action/wait_for_connection.py b/lib/ansible/plugins/action/wait_for_connection.py new file mode 100644 index 0000000..8489c76 --- /dev/null +++ b/lib/ansible/plugins/action/wait_for_connection.py @@ -0,0 +1,120 @@ +# (c) 2017, Dag Wieers <dag@wieers.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# CI-required python3 boilerplate +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import time +from datetime import datetime, timedelta + +from ansible.module_utils._text import to_text +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display + +display = Display() + + +class TimedOutException(Exception): + pass + + +class ActionModule(ActionBase): + TRANSFERS_FILES = False + _VALID_ARGS = frozenset(('connect_timeout', 'delay', 'sleep', 'timeout')) + + DEFAULT_CONNECT_TIMEOUT = 5 + DEFAULT_DELAY = 0 + DEFAULT_SLEEP = 1 + DEFAULT_TIMEOUT = 600 + + def do_until_success_or_timeout(self, what, timeout, connect_timeout, what_desc, sleep=1): + max_end_time = datetime.utcnow() + timedelta(seconds=timeout) + + e = None + while datetime.utcnow() < max_end_time: + try: + what(connect_timeout) + if what_desc: + display.debug("wait_for_connection: %s success" % what_desc) + return + except Exception as e: + error = e # PY3 compatibility to store exception for use outside of this block + if what_desc: + display.debug("wait_for_connection: %s fail (expected), retrying in %d seconds..." % (what_desc, sleep)) + time.sleep(sleep) + + raise TimedOutException("timed out waiting for %s: %s" % (what_desc, error)) + + def run(self, tmp=None, task_vars=None): + if task_vars is None: + task_vars = dict() + + connect_timeout = int(self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT)) + delay = int(self._task.args.get('delay', self.DEFAULT_DELAY)) + sleep = int(self._task.args.get('sleep', self.DEFAULT_SLEEP)) + timeout = int(self._task.args.get('timeout', self.DEFAULT_TIMEOUT)) + + if self._play_context.check_mode: + display.vvv("wait_for_connection: skipping for check_mode") + return dict(skipped=True) + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + def ping_module_test(connect_timeout): + ''' Test ping module, if available ''' + display.vvv("wait_for_connection: attempting ping module test") + # re-run interpreter discovery if we ran it in the first iteration + if self._discovered_interpreter_key: + task_vars['ansible_facts'].pop(self._discovered_interpreter_key, None) + # call connection reset between runs if it's there + try: + self._connection.reset() + except AttributeError: + pass + + ping_result = self._execute_module(module_name='ansible.legacy.ping', module_args=dict(), task_vars=task_vars) + + # Test module output + if ping_result['ping'] != 'pong': + raise Exception('ping test failed') + + start = datetime.now() + + if delay: + time.sleep(delay) + + try: + # If the connection has a transport_test method, use it first + if hasattr(self._connection, 'transport_test'): + self.do_until_success_or_timeout(self._connection.transport_test, timeout, connect_timeout, what_desc="connection port up", sleep=sleep) + + # Use the ping module test to determine end-to-end connectivity + self.do_until_success_or_timeout(ping_module_test, timeout, connect_timeout, what_desc="ping module test", sleep=sleep) + + except TimedOutException as e: + result['failed'] = True + result['msg'] = to_text(e) + + elapsed = datetime.now() - start + result['elapsed'] = elapsed.seconds + + # remove a temporary path we created + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result diff --git a/lib/ansible/plugins/action/yum.py b/lib/ansible/plugins/action/yum.py new file mode 100644 index 0000000..d90a9e0 --- /dev/null +++ b/lib/ansible/plugins/action/yum.py @@ -0,0 +1,109 @@ +# (c) 2018, Ansible Project +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleActionFail +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display + +display = Display() + +VALID_BACKENDS = frozenset(('yum', 'yum4', 'dnf')) + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = False + + def run(self, tmp=None, task_vars=None): + ''' + Action plugin handler for yum3 vs yum4(dnf) operations. + + Enables the yum module to use yum3 and/or yum4. Yum4 is a yum + command-line compatibility layer on top of dnf. Since the Ansible + modules for yum(aka yum3) and dnf(aka yum4) call each of yum3 and yum4's + python APIs natively on the backend, we need to handle this here and + pass off to the correct Ansible module to execute on the remote system. + ''' + + self._supports_check_mode = True + self._supports_async = True + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + # Carry-over concept from the package action plugin + if 'use' in self._task.args and 'use_backend' in self._task.args: + raise AnsibleActionFail("parameters are mutually exclusive: ('use', 'use_backend')") + + module = self._task.args.get('use', self._task.args.get('use_backend', 'auto')) + + if module == 'auto': + try: + if self._task.delegate_to: # if we delegate, we should use delegated host's facts + module = self._templar.template("{{hostvars['%s']['ansible_facts']['pkg_mgr']}}" % self._task.delegate_to) + else: + module = self._templar.template("{{ansible_facts.pkg_mgr}}") + except Exception: + pass # could not get it from template! + + if module not in VALID_BACKENDS: + facts = self._execute_module( + module_name="ansible.legacy.setup", module_args=dict(filter="ansible_pkg_mgr", gather_subset="!all"), + task_vars=task_vars) + display.debug("Facts %s" % facts) + module = facts.get("ansible_facts", {}).get("ansible_pkg_mgr", "auto") + if (not self._task.delegate_to or self._task.delegate_facts) and module != 'auto': + result['ansible_facts'] = {'pkg_mgr': module} + + if module not in VALID_BACKENDS: + result.update( + { + 'failed': True, + 'msg': ("Could not detect which major revision of yum is in use, which is required to determine module backend.", + "You should manually specify use_backend to tell the module whether to use the yum (yum3) or dnf (yum4) backend})"), + } + ) + + else: + if module == "yum4": + module = "dnf" + + # eliminate collisions with collections search while still allowing local override + module = 'ansible.legacy.' + module + + if not self._shared_loader_obj.module_loader.has_plugin(module): + result.update({'failed': True, 'msg': "Could not find a yum module backend for %s." % module}) + else: + # run either the yum (yum3) or dnf (yum4) backend module + new_module_args = self._task.args.copy() + if 'use_backend' in new_module_args: + del new_module_args['use_backend'] + if 'use' in new_module_args: + del new_module_args['use'] + + display.vvvv("Running %s as the backend for the yum action plugin" % module) + result.update(self._execute_module( + module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val)) + + # Cleanup + if not self._task.async_val: + # remove a temporary path we created + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result |