From 8a754e0858d922e955e71b253c139e071ecec432 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 28 Apr 2024 18:04:21 +0200 Subject: Adding upstream version 2.14.3. Signed-off-by: Daniel Baumann --- lib/ansible/executor/__init__.py | 20 + lib/ansible/executor/action_write_locks.py | 46 + lib/ansible/executor/discovery/__init__.py | 0 lib/ansible/executor/discovery/python_target.py | 48 + lib/ansible/executor/interpreter_discovery.py | 207 +++ lib/ansible/executor/module_common.py | 1428 ++++++++++++++++++++ lib/ansible/executor/play_iterator.py | 652 +++++++++ lib/ansible/executor/playbook_executor.py | 335 +++++ lib/ansible/executor/powershell/__init__.py | 0 lib/ansible/executor/powershell/async_watchdog.ps1 | 117 ++ lib/ansible/executor/powershell/async_wrapper.ps1 | 174 +++ lib/ansible/executor/powershell/become_wrapper.ps1 | 163 +++ .../executor/powershell/bootstrap_wrapper.ps1 | 13 + .../executor/powershell/coverage_wrapper.ps1 | 199 +++ lib/ansible/executor/powershell/exec_wrapper.ps1 | 237 ++++ lib/ansible/executor/powershell/module_manifest.py | 402 ++++++ .../powershell/module_powershell_wrapper.ps1 | 75 + .../executor/powershell/module_script_wrapper.ps1 | 22 + lib/ansible/executor/powershell/module_wrapper.ps1 | 226 ++++ lib/ansible/executor/process/__init__.py | 20 + lib/ansible/executor/process/worker.py | 226 ++++ lib/ansible/executor/stats.py | 100 ++ lib/ansible/executor/task_executor.py | 1239 +++++++++++++++++ lib/ansible/executor/task_queue_manager.py | 456 +++++++ lib/ansible/executor/task_result.py | 154 +++ 25 files changed, 6559 insertions(+) create mode 100644 lib/ansible/executor/__init__.py create mode 100644 lib/ansible/executor/action_write_locks.py create mode 100644 lib/ansible/executor/discovery/__init__.py create mode 100644 lib/ansible/executor/discovery/python_target.py create mode 100644 lib/ansible/executor/interpreter_discovery.py create mode 100644 lib/ansible/executor/module_common.py create mode 100644 lib/ansible/executor/play_iterator.py create mode 100644 lib/ansible/executor/playbook_executor.py create mode 100644 lib/ansible/executor/powershell/__init__.py create mode 100644 lib/ansible/executor/powershell/async_watchdog.ps1 create mode 100644 lib/ansible/executor/powershell/async_wrapper.ps1 create mode 100644 lib/ansible/executor/powershell/become_wrapper.ps1 create mode 100644 lib/ansible/executor/powershell/bootstrap_wrapper.ps1 create mode 100644 lib/ansible/executor/powershell/coverage_wrapper.ps1 create mode 100644 lib/ansible/executor/powershell/exec_wrapper.ps1 create mode 100644 lib/ansible/executor/powershell/module_manifest.py create mode 100644 lib/ansible/executor/powershell/module_powershell_wrapper.ps1 create mode 100644 lib/ansible/executor/powershell/module_script_wrapper.ps1 create mode 100644 lib/ansible/executor/powershell/module_wrapper.ps1 create mode 100644 lib/ansible/executor/process/__init__.py create mode 100644 lib/ansible/executor/process/worker.py create mode 100644 lib/ansible/executor/stats.py create mode 100644 lib/ansible/executor/task_executor.py create mode 100644 lib/ansible/executor/task_queue_manager.py create mode 100644 lib/ansible/executor/task_result.py (limited to 'lib/ansible/executor') diff --git a/lib/ansible/executor/__init__.py b/lib/ansible/executor/__init__.py new file mode 100644 index 0000000..ae8ccff --- /dev/null +++ b/lib/ansible/executor/__init__.py @@ -0,0 +1,20 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type diff --git a/lib/ansible/executor/action_write_locks.py b/lib/ansible/executor/action_write_locks.py new file mode 100644 index 0000000..fd82744 --- /dev/null +++ b/lib/ansible/executor/action_write_locks.py @@ -0,0 +1,46 @@ +# (c) 2016 - Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import multiprocessing.synchronize + +from multiprocessing import Lock + +from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS + +if 'action_write_locks' not in globals(): + # Do not initialize this more than once because it seems to bash + # the existing one. multiprocessing must be reloading the module + # when it forks? + action_write_locks = dict() # type: dict[str | None, multiprocessing.synchronize.Lock] + + # Below is a Lock for use when we weren't expecting a named module. It gets used when an action + # plugin invokes a module whose name does not match with the action's name. Slightly less + # efficient as all processes with unexpected module names will wait on this lock + action_write_locks[None] = Lock() + + # These plugins are known to be called directly by action plugins with names differing from the + # action plugin name. We precreate them here as an optimization. + # If a list of service managers is created in the future we can do the same for them. + mods = set(p['name'] for p in PKG_MGRS) + + mods.update(('copy', 'file', 'setup', 'slurp', 'stat')) + for mod_name in mods: + action_write_locks[mod_name] = Lock() diff --git a/lib/ansible/executor/discovery/__init__.py b/lib/ansible/executor/discovery/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lib/ansible/executor/discovery/python_target.py b/lib/ansible/executor/discovery/python_target.py new file mode 100644 index 0000000..7137733 --- /dev/null +++ b/lib/ansible/executor/discovery/python_target.py @@ -0,0 +1,48 @@ +# Copyright: (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# FUTURE: this could be swapped out for our bundled version of distro to move more complete platform +# logic to the targets, so long as we maintain Py2.6 compat and don't need to do any kind of script assembly + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import platform +import io +import os + + +def read_utf8_file(path, encoding='utf-8'): + if not os.access(path, os.R_OK): + return None + with io.open(path, 'r', encoding=encoding) as fd: + content = fd.read() + + return content + + +def get_platform_info(): + result = dict(platform_dist_result=[]) + + if hasattr(platform, 'dist'): + result['platform_dist_result'] = platform.dist() + + osrelease_content = read_utf8_file('/etc/os-release') + # try to fall back to /usr/lib/os-release + if not osrelease_content: + osrelease_content = read_utf8_file('/usr/lib/os-release') + + result['osrelease_content'] = osrelease_content + + return result + + +def main(): + info = get_platform_info() + + print(json.dumps(info)) + + +if __name__ == '__main__': + main() diff --git a/lib/ansible/executor/interpreter_discovery.py b/lib/ansible/executor/interpreter_discovery.py new file mode 100644 index 0000000..bfd8504 --- /dev/null +++ b/lib/ansible/executor/interpreter_discovery.py @@ -0,0 +1,207 @@ +# Copyright: (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import bisect +import json +import pkgutil +import re + +from ansible import constants as C +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.distro import LinuxDistribution +from ansible.utils.display import Display +from ansible.utils.plugin_docs import get_versioned_doclink +from ansible.module_utils.compat.version import LooseVersion +from ansible.module_utils.facts.system.distribution import Distribution +from traceback import format_exc + +OS_FAMILY_LOWER = {k.lower(): v.lower() for k, v in Distribution.OS_FAMILY.items()} + +display = Display() +foundre = re.compile(r'(?s)PLATFORM[\r\n]+(.*)FOUND(.*)ENDFOUND') + + +class InterpreterDiscoveryRequiredError(Exception): + def __init__(self, message, interpreter_name, discovery_mode): + super(InterpreterDiscoveryRequiredError, self).__init__(message) + self.interpreter_name = interpreter_name + self.discovery_mode = discovery_mode + + def __str__(self): + return self.message + + def __repr__(self): + # TODO: proper repr impl + return self.message + + +def discover_interpreter(action, interpreter_name, discovery_mode, task_vars): + # interpreter discovery is a 2-step process with the target. First, we use a simple shell-agnostic bootstrap to + # get the system type from uname, and find any random Python that can get us the info we need. For supported + # target OS types, we'll dispatch a Python script that calls plaform.dist() (for older platforms, where available) + # and brings back /etc/os-release (if present). The proper Python path is looked up in a table of known + # distros/versions with included Pythons; if nothing is found, depending on the discovery mode, either the + # default fallback of /usr/bin/python is used (if we know it's there), or discovery fails. + + # FUTURE: add logical equivalence for "python3" in the case of py3-only modules? + if interpreter_name != 'python': + raise ValueError('Interpreter discovery not supported for {0}'.format(interpreter_name)) + + host = task_vars.get('inventory_hostname', 'unknown') + res = None + platform_type = 'unknown' + found_interpreters = [u'/usr/bin/python'] # fallback value + is_auto_legacy = discovery_mode.startswith('auto_legacy') + is_silent = discovery_mode.endswith('_silent') + + try: + platform_python_map = C.config.get_config_value('_INTERPRETER_PYTHON_DISTRO_MAP', variables=task_vars) + bootstrap_python_list = C.config.get_config_value('INTERPRETER_PYTHON_FALLBACK', variables=task_vars) + + display.vvv(msg=u"Attempting {0} interpreter discovery".format(interpreter_name), host=host) + + # not all command -v impls accept a list of commands, so we have to call it once per python + command_list = ["command -v '%s'" % py for py in bootstrap_python_list] + shell_bootstrap = "echo PLATFORM; uname; echo FOUND; {0}; echo ENDFOUND".format('; '.join(command_list)) + + # FUTURE: in most cases we probably don't want to use become, but maybe sometimes we do? + res = action._low_level_execute_command(shell_bootstrap, sudoable=False) + + raw_stdout = res.get('stdout', u'') + + match = foundre.match(raw_stdout) + + if not match: + display.debug(u'raw interpreter discovery output: {0}'.format(raw_stdout), host=host) + raise ValueError('unexpected output from Python interpreter discovery') + + platform_type = match.groups()[0].lower().strip() + + found_interpreters = [interp.strip() for interp in match.groups()[1].splitlines() if interp.startswith('/')] + + display.debug(u"found interpreters: {0}".format(found_interpreters), host=host) + + if not found_interpreters: + if not is_silent: + action._discovery_warnings.append(u'No python interpreters found for ' + u'host {0} (tried {1})'.format(host, bootstrap_python_list)) + # this is lame, but returning None or throwing an exception is uglier + return u'/usr/bin/python' + + if platform_type != 'linux': + raise NotImplementedError('unsupported platform for extended discovery: {0}'.format(to_native(platform_type))) + + platform_script = pkgutil.get_data('ansible.executor.discovery', 'python_target.py') + + # FUTURE: respect pipelining setting instead of just if the connection supports it? + if action._connection.has_pipelining: + res = action._low_level_execute_command(found_interpreters[0], sudoable=False, in_data=platform_script) + else: + # FUTURE: implement on-disk case (via script action or ?) + raise NotImplementedError('pipelining support required for extended interpreter discovery') + + platform_info = json.loads(res.get('stdout')) + + distro, version = _get_linux_distro(platform_info) + + if not distro or not version: + raise NotImplementedError('unable to get Linux distribution/version info') + + family = OS_FAMILY_LOWER.get(distro.lower().strip()) + + version_map = platform_python_map.get(distro.lower().strip()) or platform_python_map.get(family) + if not version_map: + raise NotImplementedError('unsupported Linux distribution: {0}'.format(distro)) + + platform_interpreter = to_text(_version_fuzzy_match(version, version_map), errors='surrogate_or_strict') + + # provide a transition period for hosts that were using /usr/bin/python previously (but shouldn't have been) + if is_auto_legacy: + if platform_interpreter != u'/usr/bin/python' and u'/usr/bin/python' in found_interpreters: + if not is_silent: + action._discovery_warnings.append( + u"Distribution {0} {1} on host {2} should use {3}, but is using " + u"/usr/bin/python for backward compatibility with prior Ansible releases. " + u"See {4} for more information" + .format(distro, version, host, platform_interpreter, + get_versioned_doclink('reference_appendices/interpreter_discovery.html'))) + return u'/usr/bin/python' + + if platform_interpreter not in found_interpreters: + if platform_interpreter not in bootstrap_python_list: + # sanity check to make sure we looked for it + if not is_silent: + action._discovery_warnings \ + .append(u"Platform interpreter {0} on host {1} is missing from bootstrap list" + .format(platform_interpreter, host)) + + if not is_silent: + action._discovery_warnings \ + .append(u"Distribution {0} {1} on host {2} should use {3}, but is using {4}, since the " + u"discovered platform python interpreter was not present. See {5} " + u"for more information." + .format(distro, version, host, platform_interpreter, found_interpreters[0], + get_versioned_doclink('reference_appendices/interpreter_discovery.html'))) + return found_interpreters[0] + + return platform_interpreter + except NotImplementedError as ex: + display.vvv(msg=u'Python interpreter discovery fallback ({0})'.format(to_text(ex)), host=host) + except Exception as ex: + if not is_silent: + display.warning(msg=u'Unhandled error in Python interpreter discovery for host {0}: {1}'.format(host, to_text(ex))) + display.debug(msg=u'Interpreter discovery traceback:\n{0}'.format(to_text(format_exc())), host=host) + if res and res.get('stderr'): + display.vvv(msg=u'Interpreter discovery remote stderr:\n{0}'.format(to_text(res.get('stderr'))), host=host) + + if not is_silent: + action._discovery_warnings \ + .append(u"Platform {0} on host {1} is using the discovered Python interpreter at {2}, but future installation of " + u"another Python interpreter could change the meaning of that path. See {3} " + u"for more information." + .format(platform_type, host, found_interpreters[0], + get_versioned_doclink('reference_appendices/interpreter_discovery.html'))) + return found_interpreters[0] + + +def _get_linux_distro(platform_info): + dist_result = platform_info.get('platform_dist_result', []) + + if len(dist_result) == 3 and any(dist_result): + return dist_result[0], dist_result[1] + + osrelease_content = platform_info.get('osrelease_content') + + if not osrelease_content: + return u'', u'' + + osr = LinuxDistribution._parse_os_release_content(osrelease_content) + + return osr.get('id', u''), osr.get('version_id', u'') + + +def _version_fuzzy_match(version, version_map): + # try exact match first + res = version_map.get(version) + if res: + return res + + sorted_looseversions = sorted([LooseVersion(v) for v in version_map.keys()]) + + find_looseversion = LooseVersion(version) + + # slot match; return nearest previous version we're newer than + kpos = bisect.bisect(sorted_looseversions, find_looseversion) + + if kpos == 0: + # older than everything in the list, return the oldest version + # TODO: warning-worthy? + return version_map.get(sorted_looseversions[0].vstring) + + # TODO: is "past the end of the list" warning-worthy too (at least if it's not a major version match)? + + # return the next-oldest entry that we're newer than... + return version_map.get(sorted_looseversions[kpos - 1].vstring) diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py new file mode 100644 index 0000000..4d06acb --- /dev/null +++ b/lib/ansible/executor/module_common.py @@ -0,0 +1,1428 @@ +# (c) 2013-2014, Michael DeHaan +# (c) 2015 Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import ast +import base64 +import datetime +import json +import os +import shlex +import zipfile +import re +import pkgutil + +from ast import AST, Import, ImportFrom +from io import BytesIO + +from ansible.release import __version__, __author__ +from ansible import constants as C +from ansible.errors import AnsibleError +from ansible.executor.interpreter_discovery import InterpreterDiscoveryRequiredError +from ansible.executor.powershell import module_manifest as ps_manifest +from ansible.module_utils.common.json import AnsibleJSONEncoder +from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native +from ansible.plugins.loader import module_utils_loader +from ansible.utils.collection_loader._collection_finder import _get_collection_metadata, _nested_dict_get + +# Must import strategy and use write_locks from there +# If we import write_locks directly then we end up binding a +# variable to the object and then it never gets updated. +from ansible.executor import action_write_locks + +from ansible.utils.display import Display +from collections import namedtuple + +import importlib.util +import importlib.machinery + +display = Display() + +ModuleUtilsProcessEntry = namedtuple('ModuleUtilsProcessEntry', ['name_parts', 'is_ambiguous', 'has_redirected_child', 'is_optional']) + +REPLACER = b"#<>" +REPLACER_VERSION = b"\"<>\"" +REPLACER_COMPLEX = b"\"<>\"" +REPLACER_WINDOWS = b"# POWERSHELL_COMMON" +REPLACER_JSONARGS = b"<>" +REPLACER_SELINUX = b"<>" + +# We could end up writing out parameters with unicode characters so we need to +# specify an encoding for the python source file +ENCODING_STRING = u'# -*- coding: utf-8 -*-' +b_ENCODING_STRING = b'# -*- coding: utf-8 -*-' + +# module_common is relative to module_utils, so fix the path +_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils') + +# ****************************************************************************** + +ANSIBALLZ_TEMPLATE = u'''%(shebang)s +%(coding)s +_ANSIBALLZ_WRAPPER = True # For test-module.py script to tell this is a ANSIBALLZ_WRAPPER +# This code is part of Ansible, but is an independent component. +# The code in this particular templatable string, and this templatable string +# only, is BSD licensed. Modules which end up using this snippet, which is +# dynamically combined together by Ansible still belong to the author of the +# module, and they may assign their own license to the complete work. +# +# Copyright (c), James Cammarata, 2016 +# Copyright (c), Toshio Kuratomi, 2016 +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +def _ansiballz_main(): + import os + import os.path + + # Access to the working directory is required by Python when using pipelining, as well as for the coverage module. + # Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges. + try: + os.getcwd() + except OSError: + try: + os.chdir(os.path.expanduser('~')) + except OSError: + os.chdir('/') + +%(rlimit)s + + import sys + import __main__ + + # For some distros and python versions we pick up this script in the temporary + # directory. This leads to problems when the ansible module masks a python + # library that another import needs. We have not figured out what about the + # specific distros and python versions causes this to behave differently. + # + # Tested distros: + # Fedora23 with python3.4 Works + # Ubuntu15.10 with python2.7 Works + # Ubuntu15.10 with python3.4 Fails without this + # Ubuntu16.04.1 with python3.5 Fails without this + # To test on another platform: + # * use the copy module (since this shadows the stdlib copy module) + # * Turn off pipelining + # * Make sure that the destination file does not exist + # * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m' + # This will traceback in shutil. Looking at the complete traceback will show + # that shutil is importing copy which finds the ansible module instead of the + # stdlib module + scriptdir = None + try: + scriptdir = os.path.dirname(os.path.realpath(__main__.__file__)) + except (AttributeError, OSError): + # Some platforms don't set __file__ when reading from stdin + # OSX raises OSError if using abspath() in a directory we don't have + # permission to read (realpath calls abspath) + pass + + # Strip cwd from sys.path to avoid potential permissions issues + excludes = set(('', '.', scriptdir)) + sys.path = [p for p in sys.path if p not in excludes] + + import base64 + import runpy + import shutil + import tempfile + import zipfile + + if sys.version_info < (3,): + PY3 = False + else: + PY3 = True + + ZIPDATA = """%(zipdata)s""" + + # Note: temp_path isn't needed once we switch to zipimport + def invoke_module(modlib_path, temp_path, json_params): + # When installed via setuptools (including python setup.py install), + # ansible may be installed with an easy-install.pth file. That file + # may load the system-wide install of ansible rather than the one in + # the module. sitecustomize is the only way to override that setting. + z = zipfile.ZipFile(modlib_path, mode='a') + + # py3: modlib_path will be text, py2: it's bytes. Need bytes at the end + sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path + sitecustomize = sitecustomize.encode('utf-8') + # Use a ZipInfo to work around zipfile limitation on hosts with + # clocks set to a pre-1980 year (for instance, Raspberry Pi) + zinfo = zipfile.ZipInfo() + zinfo.filename = 'sitecustomize.py' + zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i) + z.writestr(zinfo, sitecustomize) + z.close() + + # Put the zipped up module_utils we got from the controller first in the python path so that we + # can monkeypatch the right basic + sys.path.insert(0, modlib_path) + + # Monkeypatch the parameters into basic + from ansible.module_utils import basic + basic._ANSIBLE_ARGS = json_params +%(coverage)s + # Run the module! By importing it as '__main__', it thinks it is executing as a script + runpy.run_module(mod_name='%(module_fqn)s', init_globals=dict(_module_fqn='%(module_fqn)s', _modlib_path=modlib_path), + run_name='__main__', alter_sys=True) + + # Ansible modules must exit themselves + print('{"msg": "New-style module did not handle its own exit", "failed": true}') + sys.exit(1) + + def debug(command, zipped_mod, json_params): + # The code here normally doesn't run. It's only used for debugging on the + # remote machine. + # + # The subcommands in this function make it easier to debug ansiballz + # modules. Here's the basic steps: + # + # Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv + # to save the module file remotely:: + # $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv + # + # Part of the verbose output will tell you where on the remote machine the + # module was written to:: + # [...] + # SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o + # PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o + # ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 + # LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"'' + # [...] + # + # Login to the remote machine and run the module file via from the previous + # step with the explode subcommand to extract the module payload into + # source files:: + # $ ssh host1 + # $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode + # Module expanded into: + # /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible + # + # You can now edit the source files to instrument the code or experiment with + # different parameter values. When you're ready to run the code you've modified + # (instead of the code from the actual zipped module), use the execute subcommand like this:: + # $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute + + # Okay to use __file__ here because we're running from a kept file + basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir') + args_path = os.path.join(basedir, 'args') + + if command == 'explode': + # transform the ZIPDATA into an exploded directory of code and then + # print the path to the code. This is an easy way for people to look + # at the code on the remote machine for debugging it in that + # environment + z = zipfile.ZipFile(zipped_mod) + for filename in z.namelist(): + if filename.startswith('/'): + raise Exception('Something wrong with this module zip file: should not contain absolute paths') + + dest_filename = os.path.join(basedir, filename) + if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename): + os.makedirs(dest_filename) + else: + directory = os.path.dirname(dest_filename) + if not os.path.exists(directory): + os.makedirs(directory) + f = open(dest_filename, 'wb') + f.write(z.read(filename)) + f.close() + + # write the args file + f = open(args_path, 'wb') + f.write(json_params) + f.close() + + print('Module expanded into:') + print('%%s' %% basedir) + exitcode = 0 + + elif command == 'execute': + # Execute the exploded code instead of executing the module from the + # embedded ZIPDATA. This allows people to easily run their modified + # code on the remote machine to see how changes will affect it. + + # Set pythonpath to the debug dir + sys.path.insert(0, basedir) + + # read in the args file which the user may have modified + with open(args_path, 'rb') as f: + json_params = f.read() + + # Monkeypatch the parameters into basic + from ansible.module_utils import basic + basic._ANSIBLE_ARGS = json_params + + # Run the module! By importing it as '__main__', it thinks it is executing as a script + runpy.run_module(mod_name='%(module_fqn)s', init_globals=None, run_name='__main__', alter_sys=True) + + # Ansible modules must exit themselves + print('{"msg": "New-style module did not handle its own exit", "failed": true}') + sys.exit(1) + + else: + print('WARNING: Unknown debug command. Doing nothing.') + exitcode = 0 + + return exitcode + + # + # See comments in the debug() method for information on debugging + # + + ANSIBALLZ_PARAMS = %(params)s + if PY3: + ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8') + try: + # There's a race condition with the controller removing the + # remote_tmpdir and this module executing under async. So we cannot + # store this in remote_tmpdir (use system tempdir instead) + # Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport + # (this helps ansible-test produce coverage stats) + temp_path = tempfile.mkdtemp(prefix='ansible_%(ansible_module)s_payload_') + + zipped_mod = os.path.join(temp_path, 'ansible_%(ansible_module)s_payload.zip') + + with open(zipped_mod, 'wb') as modlib: + modlib.write(base64.b64decode(ZIPDATA)) + + if len(sys.argv) == 2: + exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS) + else: + # Note: temp_path isn't needed once we switch to zipimport + invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS) + finally: + try: + shutil.rmtree(temp_path) + except (NameError, OSError): + # tempdir creation probably failed + pass + sys.exit(exitcode) + +if __name__ == '__main__': + _ansiballz_main() +''' + +ANSIBALLZ_COVERAGE_TEMPLATE = ''' + os.environ['COVERAGE_FILE'] = '%(coverage_output)s=python-%%s=coverage' %% '.'.join(str(v) for v in sys.version_info[:2]) + + import atexit + + try: + import coverage + except ImportError: + print('{"msg": "Could not import `coverage` module.", "failed": true}') + sys.exit(1) + + cov = coverage.Coverage(config_file='%(coverage_config)s') + + def atexit_coverage(): + cov.stop() + cov.save() + + atexit.register(atexit_coverage) + + cov.start() +''' + +ANSIBALLZ_COVERAGE_CHECK_TEMPLATE = ''' + try: + if PY3: + import importlib.util + if importlib.util.find_spec('coverage') is None: + raise ImportError + else: + import imp + imp.find_module('coverage') + except ImportError: + print('{"msg": "Could not find `coverage` module.", "failed": true}') + sys.exit(1) +''' + +ANSIBALLZ_RLIMIT_TEMPLATE = ''' + import resource + + existing_soft, existing_hard = resource.getrlimit(resource.RLIMIT_NOFILE) + + # adjust soft limit subject to existing hard limit + requested_soft = min(existing_hard, %(rlimit_nofile)d) + + if requested_soft != existing_soft: + try: + resource.setrlimit(resource.RLIMIT_NOFILE, (requested_soft, existing_hard)) + except ValueError: + # some platforms (eg macOS) lie about their hard limit + pass +''' + + +def _strip_comments(source): + # Strip comments and blank lines from the wrapper + buf = [] + for line in source.splitlines(): + l = line.strip() + if not l or l.startswith(u'#'): + continue + buf.append(line) + return u'\n'.join(buf) + + +if C.DEFAULT_KEEP_REMOTE_FILES: + # Keep comments when KEEP_REMOTE_FILES is set. That way users will see + # the comments with some nice usage instructions + ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE +else: + # ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size + ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE) + +# dirname(dirname(dirname(site-packages/ansible/executor/module_common.py) == site-packages +# Do this instead of getting site-packages from distutils.sysconfig so we work when we +# haven't been installed +site_packages = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) +CORE_LIBRARY_PATH_RE = re.compile(r'%s/(?Pansible/modules/.*)\.(py|ps1)$' % re.escape(site_packages)) +COLLECTION_PATH_RE = re.compile(r'/(?Pansible_collections/[^/]+/[^/]+/plugins/modules/.*)\.(py|ps1)$') + +# Detect new-style Python modules by looking for required imports: +# import ansible_collections.[my_ns.my_col.plugins.module_utils.my_module_util] +# from ansible_collections.[my_ns.my_col.plugins.module_utils import my_module_util] +# import ansible.module_utils[.basic] +# from ansible.module_utils[ import basic] +# from ansible.module_utils[.basic import AnsibleModule] +# from ..module_utils[ import basic] +# from ..module_utils[.basic import AnsibleModule] +NEW_STYLE_PYTHON_MODULE_RE = re.compile( + # Relative imports + br'(?:from +\.{2,} *module_utils.* +import |' + # Collection absolute imports: + br'from +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.* +import |' + br'import +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.*|' + # Core absolute imports + br'from +ansible\.module_utils.* +import |' + br'import +ansible\.module_utils\.)' +) + + +class ModuleDepFinder(ast.NodeVisitor): + def __init__(self, module_fqn, tree, is_pkg_init=False, *args, **kwargs): + """ + Walk the ast tree for the python module. + :arg module_fqn: The fully qualified name to reach this module in dotted notation. + example: ansible.module_utils.basic + :arg is_pkg_init: Inform the finder it's looking at a package init (eg __init__.py) to allow + relative import expansion to use the proper package level without having imported it locally first. + + Save submodule[.submoduleN][.identifier] into self.submodules + when they are from ansible.module_utils or ansible_collections packages + + self.submodules will end up with tuples like: + - ('ansible', 'module_utils', 'basic',) + - ('ansible', 'module_utils', 'urls', 'fetch_url') + - ('ansible', 'module_utils', 'database', 'postgres') + - ('ansible', 'module_utils', 'database', 'postgres', 'quote') + - ('ansible', 'module_utils', 'database', 'postgres', 'quote') + - ('ansible_collections', 'my_ns', 'my_col', 'plugins', 'module_utils', 'foo') + + It's up to calling code to determine whether the final element of the + tuple are module names or something else (function, class, or variable names) + .. seealso:: :python3:class:`ast.NodeVisitor` + """ + super(ModuleDepFinder, self).__init__(*args, **kwargs) + self._tree = tree # squirrel this away so we can compare node parents to it + self.submodules = set() + self.optional_imports = set() + self.module_fqn = module_fqn + self.is_pkg_init = is_pkg_init + + self._visit_map = { + Import: self.visit_Import, + ImportFrom: self.visit_ImportFrom, + } + + self.visit(tree) + + def generic_visit(self, node): + """Overridden ``generic_visit`` that makes some assumptions about our + use case, and improves performance by calling visitors directly instead + of calling ``visit`` to offload calling visitors. + """ + generic_visit = self.generic_visit + visit_map = self._visit_map + for field, value in ast.iter_fields(node): + if isinstance(value, list): + for item in value: + if isinstance(item, (Import, ImportFrom)): + item.parent = node + visit_map[item.__class__](item) + elif isinstance(item, AST): + generic_visit(item) + + visit = generic_visit + + def visit_Import(self, node): + """ + Handle import ansible.module_utils.MODLIB[.MODLIBn] [as asname] + + We save these as interesting submodules when the imported library is in ansible.module_utils + or ansible.collections + """ + for alias in node.names: + if (alias.name.startswith('ansible.module_utils.') or + alias.name.startswith('ansible_collections.')): + py_mod = tuple(alias.name.split('.')) + self.submodules.add(py_mod) + # if the import's parent is the root document, it's a required import, otherwise it's optional + if node.parent != self._tree: + self.optional_imports.add(py_mod) + self.generic_visit(node) + + def visit_ImportFrom(self, node): + """ + Handle from ansible.module_utils.MODLIB import [.MODLIBn] [as asname] + + Also has to handle relative imports + + We save these as interesting submodules when the imported library is in ansible.module_utils + or ansible.collections + """ + + # FIXME: These should all get skipped: + # from ansible.executor import module_common + # from ...executor import module_common + # from ... import executor (Currently it gives a non-helpful error) + if node.level > 0: + # if we're in a package init, we have to add one to the node level (and make it none if 0 to preserve the right slicing behavior) + level_slice_offset = -node.level + 1 or None if self.is_pkg_init else -node.level + if self.module_fqn: + parts = tuple(self.module_fqn.split('.')) + if node.module: + # relative import: from .module import x + node_module = '.'.join(parts[:level_slice_offset] + (node.module,)) + else: + # relative import: from . import x + node_module = '.'.join(parts[:level_slice_offset]) + else: + # fall back to an absolute import + node_module = node.module + else: + # absolute import: from module import x + node_module = node.module + + # Specialcase: six is a special case because of its + # import logic + py_mod = None + if node.names[0].name == '_six': + self.submodules.add(('_six',)) + elif node_module.startswith('ansible.module_utils'): + # from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname] + # from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname] + # from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname] + # from ansible.module_utils import MODULE1 [,MODULEn] [as asname] + py_mod = tuple(node_module.split('.')) + + elif node_module.startswith('ansible_collections.'): + if node_module.endswith('plugins.module_utils') or '.plugins.module_utils.' in node_module: + # from ansible_collections.ns.coll.plugins.module_utils import MODULE [as aname] [,MODULE2] [as aname] + # from ansible_collections.ns.coll.plugins.module_utils.MODULE import IDENTIFIER [as aname] + # FIXME: Unhandled cornercase (needs to be ignored): + # from ansible_collections.ns.coll.plugins.[!module_utils].[FOO].plugins.module_utils import IDENTIFIER + py_mod = tuple(node_module.split('.')) + else: + # Not from module_utils so ignore. for instance: + # from ansible_collections.ns.coll.plugins.lookup import IDENTIFIER + pass + + if py_mod: + for alias in node.names: + self.submodules.add(py_mod + (alias.name,)) + # if the import's parent is the root document, it's a required import, otherwise it's optional + if node.parent != self._tree: + self.optional_imports.add(py_mod + (alias.name,)) + + self.generic_visit(node) + + +def _slurp(path): + if not os.path.exists(path): + raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path)) + with open(path, 'rb') as fd: + data = fd.read() + return data + + +def _get_shebang(interpreter, task_vars, templar, args=tuple(), remote_is_local=False): + """ + Handles the different ways ansible allows overriding the shebang target for a module. + """ + # FUTURE: add logical equivalence for python3 in the case of py3-only modules + + interpreter_name = os.path.basename(interpreter).strip() + + # name for interpreter var + interpreter_config = u'ansible_%s_interpreter' % interpreter_name + # key for config + interpreter_config_key = "INTERPRETER_%s" % interpreter_name.upper() + + interpreter_out = None + + # looking for python, rest rely on matching vars + if interpreter_name == 'python': + # skip detection for network os execution, use playbook supplied one if possible + if remote_is_local: + interpreter_out = task_vars['ansible_playbook_python'] + + # a config def exists for this interpreter type; consult config for the value + elif C.config.get_configuration_definition(interpreter_config_key): + + interpreter_from_config = C.config.get_config_value(interpreter_config_key, variables=task_vars) + interpreter_out = templar.template(interpreter_from_config.strip()) + + # handle interpreter discovery if requested or empty interpreter was provided + if not interpreter_out or interpreter_out in ['auto', 'auto_legacy', 'auto_silent', 'auto_legacy_silent']: + + discovered_interpreter_config = u'discovered_interpreter_%s' % interpreter_name + facts_from_task_vars = task_vars.get('ansible_facts', {}) + + if discovered_interpreter_config not in facts_from_task_vars: + # interpreter discovery is desired, but has not been run for this host + raise InterpreterDiscoveryRequiredError("interpreter discovery needed", interpreter_name=interpreter_name, discovery_mode=interpreter_out) + else: + interpreter_out = facts_from_task_vars[discovered_interpreter_config] + else: + raise InterpreterDiscoveryRequiredError("interpreter discovery required", interpreter_name=interpreter_name, discovery_mode='auto_legacy') + + elif interpreter_config in task_vars: + # for non python we consult vars for a possible direct override + interpreter_out = templar.template(task_vars.get(interpreter_config).strip()) + + if not interpreter_out: + # nothing matched(None) or in case someone configures empty string or empty intepreter + interpreter_out = interpreter + + # set shebang + shebang = u'#!{0}'.format(interpreter_out) + if args: + shebang = shebang + u' ' + u' '.join(args) + + return shebang, interpreter_out + + +class ModuleUtilLocatorBase: + def __init__(self, fq_name_parts, is_ambiguous=False, child_is_redirected=False, is_optional=False): + self._is_ambiguous = is_ambiguous + # a child package redirection could cause intermediate package levels to be missing, eg + # from ansible.module_utils.x.y.z import foo; if x.y.z.foo is redirected, we may not have packages on disk for + # the intermediate packages x.y.z, so we'll need to supply empty packages for those + self._child_is_redirected = child_is_redirected + self._is_optional = is_optional + self.found = False + self.redirected = False + self.fq_name_parts = fq_name_parts + self.source_code = '' + self.output_path = '' + self.is_package = False + self._collection_name = None + # for ambiguous imports, we should only test for things more than one level below module_utils + # this lets us detect erroneous imports and redirections earlier + if is_ambiguous and len(self._get_module_utils_remainder_parts(fq_name_parts)) > 1: + self.candidate_names = [fq_name_parts, fq_name_parts[:-1]] + else: + self.candidate_names = [fq_name_parts] + + @property + def candidate_names_joined(self): + return ['.'.join(n) for n in self.candidate_names] + + def _handle_redirect(self, name_parts): + module_utils_relative_parts = self._get_module_utils_remainder_parts(name_parts) + + # only allow redirects from below module_utils- if above that, bail out (eg, parent package names) + if not module_utils_relative_parts: + return False + + try: + collection_metadata = _get_collection_metadata(self._collection_name) + except ValueError as ve: # collection not found or some other error related to collection load + if self._is_optional: + return False + raise AnsibleError('error processing module_util {0} loading redirected collection {1}: {2}' + .format('.'.join(name_parts), self._collection_name, to_native(ve))) + + routing_entry = _nested_dict_get(collection_metadata, ['plugin_routing', 'module_utils', '.'.join(module_utils_relative_parts)]) + if not routing_entry: + return False + # FIXME: add deprecation warning support + + dep_or_ts = routing_entry.get('tombstone') + removed = dep_or_ts is not None + if not removed: + dep_or_ts = routing_entry.get('deprecation') + + if dep_or_ts: + removal_date = dep_or_ts.get('removal_date') + removal_version = dep_or_ts.get('removal_version') + warning_text = dep_or_ts.get('warning_text') + + msg = 'module_util {0} has been removed'.format('.'.join(name_parts)) + if warning_text: + msg += ' ({0})'.format(warning_text) + else: + msg += '.' + + display.deprecated(msg, removal_version, removed, removal_date, self._collection_name) + if 'redirect' in routing_entry: + self.redirected = True + source_pkg = '.'.join(name_parts) + self.is_package = True # treat all redirects as packages + redirect_target_pkg = routing_entry['redirect'] + + # expand FQCN redirects + if not redirect_target_pkg.startswith('ansible_collections'): + split_fqcn = redirect_target_pkg.split('.') + if len(split_fqcn) < 3: + raise Exception('invalid redirect for {0}: {1}'.format(source_pkg, redirect_target_pkg)) + # assume it's an FQCN, expand it + redirect_target_pkg = 'ansible_collections.{0}.{1}.plugins.module_utils.{2}'.format( + split_fqcn[0], # ns + split_fqcn[1], # coll + '.'.join(split_fqcn[2:]) # sub-module_utils remainder + ) + display.vvv('redirecting module_util {0} to {1}'.format(source_pkg, redirect_target_pkg)) + self.source_code = self._generate_redirect_shim_source(source_pkg, redirect_target_pkg) + return True + return False + + def _get_module_utils_remainder_parts(self, name_parts): + # subclasses should override to return the name parts after module_utils + return [] + + def _get_module_utils_remainder(self, name_parts): + # return the remainder parts as a package string + return '.'.join(self._get_module_utils_remainder_parts(name_parts)) + + def _find_module(self, name_parts): + return False + + def _locate(self, redirect_first=True): + for candidate_name_parts in self.candidate_names: + if redirect_first and self._handle_redirect(candidate_name_parts): + break + + if self._find_module(candidate_name_parts): + break + + if not redirect_first and self._handle_redirect(candidate_name_parts): + break + + else: # didn't find what we were looking for- last chance for packages whose parents were redirected + if self._child_is_redirected: # make fake packages + self.is_package = True + self.source_code = '' + else: # nope, just bail + return + + if self.is_package: + path_parts = candidate_name_parts + ('__init__',) + else: + path_parts = candidate_name_parts + self.found = True + self.output_path = os.path.join(*path_parts) + '.py' + self.fq_name_parts = candidate_name_parts + + def _generate_redirect_shim_source(self, fq_source_module, fq_target_module): + return """ +import sys +import {1} as mod + +sys.modules['{0}'] = mod +""".format(fq_source_module, fq_target_module) + + # FIXME: add __repr__ impl + + +class LegacyModuleUtilLocator(ModuleUtilLocatorBase): + def __init__(self, fq_name_parts, is_ambiguous=False, mu_paths=None, child_is_redirected=False): + super(LegacyModuleUtilLocator, self).__init__(fq_name_parts, is_ambiguous, child_is_redirected) + + if fq_name_parts[0:2] != ('ansible', 'module_utils'): + raise Exception('this class can only locate from ansible.module_utils, got {0}'.format(fq_name_parts)) + + if fq_name_parts[2] == 'six': + # FIXME: handle the ansible.module_utils.six._six case with a redirect or an internal _six attr on six itself? + # six creates its submodules at runtime; convert all these to just 'ansible.module_utils.six' + fq_name_parts = ('ansible', 'module_utils', 'six') + self.candidate_names = [fq_name_parts] + + self._mu_paths = mu_paths + self._collection_name = 'ansible.builtin' # legacy module utils always look in ansible.builtin for redirects + self._locate(redirect_first=False) # let local stuff override redirects for legacy + + def _get_module_utils_remainder_parts(self, name_parts): + return name_parts[2:] # eg, foo.bar for ansible.module_utils.foo.bar + + def _find_module(self, name_parts): + rel_name_parts = self._get_module_utils_remainder_parts(name_parts) + + # no redirection; try to find the module + if len(rel_name_parts) == 1: # direct child of module_utils, just search the top-level dirs we were given + paths = self._mu_paths + else: # a nested submodule of module_utils, extend the paths given with the intermediate package names + paths = [os.path.join(p, *rel_name_parts[:-1]) for p in + self._mu_paths] # extend the MU paths with the relative bit + + # find_spec needs the full module name + self._info = info = importlib.machinery.PathFinder.find_spec('.'.join(name_parts), paths) + if info is not None and os.path.splitext(info.origin)[1] in importlib.machinery.SOURCE_SUFFIXES: + self.is_package = info.origin.endswith('/__init__.py') + path = info.origin + else: + return False + self.source_code = _slurp(path) + + return True + + +class CollectionModuleUtilLocator(ModuleUtilLocatorBase): + def __init__(self, fq_name_parts, is_ambiguous=False, child_is_redirected=False, is_optional=False): + super(CollectionModuleUtilLocator, self).__init__(fq_name_parts, is_ambiguous, child_is_redirected, is_optional) + + if fq_name_parts[0] != 'ansible_collections': + raise Exception('CollectionModuleUtilLocator can only locate from ansible_collections, got {0}'.format(fq_name_parts)) + elif len(fq_name_parts) >= 6 and fq_name_parts[3:5] != ('plugins', 'module_utils'): + raise Exception('CollectionModuleUtilLocator can only locate below ansible_collections.(ns).(coll).plugins.module_utils, got {0}' + .format(fq_name_parts)) + + self._collection_name = '.'.join(fq_name_parts[1:3]) + + self._locate() + + def _find_module(self, name_parts): + # synthesize empty inits for packages down through module_utils- we don't want to allow those to be shipped over, but the + # package hierarchy needs to exist + if len(name_parts) < 6: + self.source_code = '' + self.is_package = True + return True + + # NB: we can't use pkgutil.get_data safely here, since we don't want to import/execute package/module code on + # the controller while analyzing/assembling the module, so we'll have to manually import the collection's + # Python package to locate it (import root collection, reassemble resource path beneath, fetch source) + + collection_pkg_name = '.'.join(name_parts[0:3]) + resource_base_path = os.path.join(*name_parts[3:]) + + src = None + # look for package_dir first, then module + try: + src = pkgutil.get_data(collection_pkg_name, to_native(os.path.join(resource_base_path, '__init__.py'))) + except ImportError: + pass + + # TODO: we might want to synthesize fake inits for py3-style packages, for now they're required beneath module_utils + + if src is not None: # empty string is OK + self.is_package = True + else: + try: + src = pkgutil.get_data(collection_pkg_name, to_native(resource_base_path + '.py')) + except ImportError: + pass + + if src is None: # empty string is OK + return False + + self.source_code = src + return True + + def _get_module_utils_remainder_parts(self, name_parts): + return name_parts[5:] # eg, foo.bar for ansible_collections.ns.coll.plugins.module_utils.foo.bar + + +def recursive_finder(name, module_fqn, module_data, zf): + """ + Using ModuleDepFinder, make sure we have all of the module_utils files that + the module and its module_utils files needs. (no longer actually recursive) + :arg name: Name of the python module we're examining + :arg module_fqn: Fully qualified name of the python module we're scanning + :arg module_data: string Python code of the module we're scanning + :arg zf: An open :python:class:`zipfile.ZipFile` object that holds the Ansible module payload + which we're assembling + """ + + # py_module_cache maps python module names to a tuple of the code in the module + # and the pathname to the module. + # Here we pre-load it with modules which we create without bothering to + # read from actual files (In some cases, these need to differ from what ansible + # ships because they're namespace packages in the module) + # FIXME: do we actually want ns pkg behavior for these? Seems like they should just be forced to emptyish pkg stubs + py_module_cache = { + ('ansible',): ( + b'from pkgutil import extend_path\n' + b'__path__=extend_path(__path__,__name__)\n' + b'__version__="' + to_bytes(__version__) + + b'"\n__author__="' + to_bytes(__author__) + b'"\n', + 'ansible/__init__.py'), + ('ansible', 'module_utils'): ( + b'from pkgutil import extend_path\n' + b'__path__=extend_path(__path__,__name__)\n', + 'ansible/module_utils/__init__.py')} + + module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)] + module_utils_paths.append(_MODULE_UTILS_PATH) + + # Parse the module code and find the imports of ansible.module_utils + try: + tree = compile(module_data, '', 'exec', ast.PyCF_ONLY_AST) + except (SyntaxError, IndentationError) as e: + raise AnsibleError("Unable to import %s due to %s" % (name, e.msg)) + + finder = ModuleDepFinder(module_fqn, tree) + + # the format of this set is a tuple of the module name and whether or not the import is ambiguous as a module name + # or an attribute of a module (eg from x.y import z <-- is z a module or an attribute of x.y?) + modules_to_process = [ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports) for m in finder.submodules] + + # HACK: basic is currently always required since module global init is currently tied up with AnsiballZ arg input + modules_to_process.append(ModuleUtilsProcessEntry(('ansible', 'module_utils', 'basic'), False, False, is_optional=False)) + + # we'll be adding new modules inline as we discover them, so just keep going til we've processed them all + while modules_to_process: + modules_to_process.sort() # not strictly necessary, but nice to process things in predictable and repeatable order + py_module_name, is_ambiguous, child_is_redirected, is_optional = modules_to_process.pop(0) + + if py_module_name in py_module_cache: + # this is normal; we'll often see the same module imported many times, but we only need to process it once + continue + + if py_module_name[0:2] == ('ansible', 'module_utils'): + module_info = LegacyModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous, + mu_paths=module_utils_paths, child_is_redirected=child_is_redirected) + elif py_module_name[0] == 'ansible_collections': + module_info = CollectionModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous, + child_is_redirected=child_is_redirected, is_optional=is_optional) + else: + # FIXME: dot-joined result + display.warning('ModuleDepFinder improperly found a non-module_utils import %s' + % [py_module_name]) + continue + + # Could not find the module. Construct a helpful error message. + if not module_info.found: + if is_optional: + # this was a best-effort optional import that we couldn't find, oh well, move along... + continue + # FIXME: use dot-joined candidate names + msg = 'Could not find imported module support code for {0}. Looked for ({1})'.format(module_fqn, module_info.candidate_names_joined) + raise AnsibleError(msg) + + # check the cache one more time with the module we actually found, since the name could be different than the input + # eg, imported name vs module + if module_info.fq_name_parts in py_module_cache: + continue + + # compile the source, process all relevant imported modules + try: + tree = compile(module_info.source_code, '', 'exec', ast.PyCF_ONLY_AST) + except (SyntaxError, IndentationError) as e: + raise AnsibleError("Unable to import %s due to %s" % (module_info.fq_name_parts, e.msg)) + + finder = ModuleDepFinder('.'.join(module_info.fq_name_parts), tree, module_info.is_package) + modules_to_process.extend(ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports) + for m in finder.submodules if m not in py_module_cache) + + # we've processed this item, add it to the output list + py_module_cache[module_info.fq_name_parts] = (module_info.source_code, module_info.output_path) + + # ensure we process all ancestor package inits + accumulated_pkg_name = [] + for pkg in module_info.fq_name_parts[:-1]: + accumulated_pkg_name.append(pkg) # we're accumulating this across iterations + normalized_name = tuple(accumulated_pkg_name) # extra machinations to get a hashable type (list is not) + if normalized_name not in py_module_cache: + modules_to_process.append(ModuleUtilsProcessEntry(normalized_name, False, module_info.redirected, is_optional=is_optional)) + + for py_module_name in py_module_cache: + py_module_file_name = py_module_cache[py_module_name][1] + + zf.writestr(py_module_file_name, py_module_cache[py_module_name][0]) + mu_file = to_text(py_module_file_name, errors='surrogate_or_strict') + display.vvvvv("Including module_utils file %s" % mu_file) + + +def _is_binary(b_module_data): + textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f])) + start = b_module_data[:1024] + return bool(start.translate(None, textchars)) + + +def _get_ansible_module_fqn(module_path): + """ + Get the fully qualified name for an ansible module based on its pathname + + remote_module_fqn is the fully qualified name. Like ansible.modules.system.ping + Or ansible_collections.Namespace.Collection_name.plugins.modules.ping + .. warning:: This function is for ansible modules only. It won't work for other things + (non-module plugins, etc) + """ + remote_module_fqn = None + + # Is this a core module? + match = CORE_LIBRARY_PATH_RE.search(module_path) + if not match: + # Is this a module in a collection? + match = COLLECTION_PATH_RE.search(module_path) + + # We can tell the FQN for core modules and collection modules + if match: + path = match.group('path') + if '.' in path: + # FQNs must be valid as python identifiers. This sanity check has failed. + # we could check other things as well + raise ValueError('Module name (or path) was not a valid python identifier') + + remote_module_fqn = '.'.join(path.split('/')) + else: + # Currently we do not handle modules in roles so we can end up here for that reason + raise ValueError("Unable to determine module's fully qualified name") + + return remote_module_fqn + + +def _add_module_to_zip(zf, remote_module_fqn, b_module_data): + """Add a module from ansible or from an ansible collection into the module zip""" + module_path_parts = remote_module_fqn.split('.') + + # Write the module + module_path = '/'.join(module_path_parts) + '.py' + zf.writestr(module_path, b_module_data) + + # Write the __init__.py's necessary to get there + if module_path_parts[0] == 'ansible': + # The ansible namespace is setup as part of the module_utils setup... + start = 2 + existing_paths = frozenset() + else: + # ... but ansible_collections and other toplevels are not + start = 1 + existing_paths = frozenset(zf.namelist()) + + for idx in range(start, len(module_path_parts)): + package_path = '/'.join(module_path_parts[:idx]) + '/__init__.py' + # If a collections module uses module_utils from a collection then most packages will have already been added by recursive_finder. + if package_path in existing_paths: + continue + # Note: We don't want to include more than one ansible module in a payload at this time + # so no need to fill the __init__.py with namespace code + zf.writestr(package_path, b'') + + +def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become, + become_method, become_user, become_password, become_flags, environment, remote_is_local=False): + """ + Given the source of the module, convert it to a Jinja2 template to insert + module code and return whether it's a new or old style module. + """ + module_substyle = module_style = 'old' + + # module_style is something important to calling code (ActionBase). It + # determines how arguments are formatted (json vs k=v) and whether + # a separate arguments file needs to be sent over the wire. + # module_substyle is extra information that's useful internally. It tells + # us what we have to look to substitute in the module files and whether + # we're using module replacer or ansiballz to format the module itself. + if _is_binary(b_module_data): + module_substyle = module_style = 'binary' + elif REPLACER in b_module_data: + # Do REPLACER before from ansible.module_utils because we need make sure + # we substitute "from ansible.module_utils basic" for REPLACER + module_style = 'new' + module_substyle = 'python' + b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *') + elif NEW_STYLE_PYTHON_MODULE_RE.search(b_module_data): + module_style = 'new' + module_substyle = 'python' + elif REPLACER_WINDOWS in b_module_data: + module_style = 'new' + module_substyle = 'powershell' + b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy') + elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \ + or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\ + or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE) \ + or re.search(b'#AnsibleRequires -Powershell', b_module_data, re.IGNORECASE) \ + or re.search(b'#AnsibleRequires -CSharpUtil', b_module_data, re.IGNORECASE): + module_style = 'new' + module_substyle = 'powershell' + elif REPLACER_JSONARGS in b_module_data: + module_style = 'new' + module_substyle = 'jsonargs' + elif b'WANT_JSON' in b_module_data: + module_substyle = module_style = 'non_native_want_json' + + shebang = None + # Neither old-style, non_native_want_json nor binary modules should be modified + # except for the shebang line (Done by modify_module) + if module_style in ('old', 'non_native_want_json', 'binary'): + return b_module_data, module_style, shebang + + output = BytesIO() + + try: + remote_module_fqn = _get_ansible_module_fqn(module_path) + except ValueError: + # Modules in roles currently are not found by the fqn heuristic so we + # fallback to this. This means that relative imports inside a module from + # a role may fail. Absolute imports should be used for future-proofness. + # People should start writing collections instead of modules in roles so we + # may never fix this + display.debug('ANSIBALLZ: Could not determine module FQN') + remote_module_fqn = 'ansible.modules.%s' % module_name + + if module_substyle == 'python': + params = dict(ANSIBLE_MODULE_ARGS=module_args,) + try: + python_repred_params = repr(json.dumps(params, cls=AnsibleJSONEncoder, vault_to_text=True)) + except TypeError as e: + raise AnsibleError("Unable to pass options to module, they must be JSON serializable: %s" % to_native(e)) + + try: + compression_method = getattr(zipfile, module_compression) + except AttributeError: + display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression) + compression_method = zipfile.ZIP_STORED + + lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache') + cached_module_filename = os.path.join(lookup_path, "%s-%s" % (remote_module_fqn, module_compression)) + + zipdata = None + # Optimization -- don't lock if the module has already been cached + if os.path.exists(cached_module_filename): + display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename) + with open(cached_module_filename, 'rb') as module_data: + zipdata = module_data.read() + else: + if module_name in action_write_locks.action_write_locks: + display.debug('ANSIBALLZ: Using lock for %s' % module_name) + lock = action_write_locks.action_write_locks[module_name] + else: + # If the action plugin directly invokes the module (instead of + # going through a strategy) then we don't have a cross-process + # Lock specifically for this module. Use the "unexpected + # module" lock instead + display.debug('ANSIBALLZ: Using generic lock for %s' % module_name) + lock = action_write_locks.action_write_locks[None] + + display.debug('ANSIBALLZ: Acquiring lock') + with lock: + display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock)) + # Check that no other process has created this while we were + # waiting for the lock + if not os.path.exists(cached_module_filename): + display.debug('ANSIBALLZ: Creating module') + # Create the module zip data + zipoutput = BytesIO() + zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method) + + # walk the module imports, looking for module_utils to send- they'll be added to the zipfile + recursive_finder(module_name, remote_module_fqn, b_module_data, zf) + + display.debug('ANSIBALLZ: Writing module into payload') + _add_module_to_zip(zf, remote_module_fqn, b_module_data) + + zf.close() + zipdata = base64.b64encode(zipoutput.getvalue()) + + # Write the assembled module to a temp file (write to temp + # so that no one looking for the file reads a partially + # written file) + # + # FIXME: Once split controller/remote is merged, this can be simplified to + # os.makedirs(lookup_path, exist_ok=True) + if not os.path.exists(lookup_path): + try: + # Note -- if we have a global function to setup, that would + # be a better place to run this + os.makedirs(lookup_path) + except OSError: + # Multiple processes tried to create the directory. If it still does not + # exist, raise the original exception. + if not os.path.exists(lookup_path): + raise + display.debug('ANSIBALLZ: Writing module') + with open(cached_module_filename + '-part', 'wb') as f: + f.write(zipdata) + + # Rename the file into its final position in the cache so + # future users of this module can read it off the + # filesystem instead of constructing from scratch. + display.debug('ANSIBALLZ: Renaming module') + os.rename(cached_module_filename + '-part', cached_module_filename) + display.debug('ANSIBALLZ: Done creating module') + + if zipdata is None: + display.debug('ANSIBALLZ: Reading module after lock') + # Another process wrote the file while we were waiting for + # the write lock. Go ahead and read the data from disk + # instead of re-creating it. + try: + with open(cached_module_filename, 'rb') as f: + zipdata = f.read() + except IOError: + raise AnsibleError('A different worker process failed to create module file. ' + 'Look at traceback for that process for debugging information.') + zipdata = to_text(zipdata, errors='surrogate_or_strict') + + o_interpreter, o_args = _extract_interpreter(b_module_data) + if o_interpreter is None: + o_interpreter = u'/usr/bin/python' + + shebang, interpreter = _get_shebang(o_interpreter, task_vars, templar, o_args, remote_is_local=remote_is_local) + + # FUTURE: the module cache entry should be invalidated if we got this value from a host-dependent source + rlimit_nofile = C.config.get_config_value('PYTHON_MODULE_RLIMIT_NOFILE', variables=task_vars) + + if not isinstance(rlimit_nofile, int): + rlimit_nofile = int(templar.template(rlimit_nofile)) + + if rlimit_nofile: + rlimit = ANSIBALLZ_RLIMIT_TEMPLATE % dict( + rlimit_nofile=rlimit_nofile, + ) + else: + rlimit = '' + + coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG') + + if coverage_config: + coverage_output = os.environ['_ANSIBLE_COVERAGE_OUTPUT'] + + if coverage_output: + # Enable code coverage analysis of the module. + # This feature is for internal testing and may change without notice. + coverage = ANSIBALLZ_COVERAGE_TEMPLATE % dict( + coverage_config=coverage_config, + coverage_output=coverage_output, + ) + else: + # Verify coverage is available without importing it. + # This will detect when a module would fail with coverage enabled with minimal overhead. + coverage = ANSIBALLZ_COVERAGE_CHECK_TEMPLATE + else: + coverage = '' + + now = datetime.datetime.utcnow() + output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict( + zipdata=zipdata, + ansible_module=module_name, + module_fqn=remote_module_fqn, + params=python_repred_params, + shebang=shebang, + coding=ENCODING_STRING, + year=now.year, + month=now.month, + day=now.day, + hour=now.hour, + minute=now.minute, + second=now.second, + coverage=coverage, + rlimit=rlimit, + ))) + b_module_data = output.getvalue() + + elif module_substyle == 'powershell': + # Powershell/winrm don't actually make use of shebang so we can + # safely set this here. If we let the fallback code handle this + # it can fail in the presence of the UTF8 BOM commonly added by + # Windows text editors + shebang = u'#!powershell' + # create the common exec wrapper payload and set that as the module_data + # bytes + b_module_data = ps_manifest._create_powershell_wrapper( + b_module_data, module_path, module_args, environment, + async_timeout, become, become_method, become_user, become_password, + become_flags, module_substyle, task_vars, remote_module_fqn + ) + + elif module_substyle == 'jsonargs': + module_args_json = to_bytes(json.dumps(module_args, cls=AnsibleJSONEncoder, vault_to_text=True)) + + # these strings could be included in a third-party module but + # officially they were included in the 'basic' snippet for new-style + # python modules (which has been replaced with something else in + # ansiballz) If we remove them from jsonargs-style module replacer + # then we can remove them everywhere. + python_repred_args = to_bytes(repr(module_args_json)) + b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__))) + b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args) + b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS))) + + # The main event -- substitute the JSON args string into the module + b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json) + + facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict') + b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility) + + return (b_module_data, module_style, shebang) + + +def _extract_interpreter(b_module_data): + """ + Used to extract shebang expression from binary module data and return a text + string with the shebang, or None if no shebang is detected. + """ + + interpreter = None + args = [] + b_lines = b_module_data.split(b"\n", 1) + if b_lines[0].startswith(b"#!"): + b_shebang = b_lines[0].strip() + + # shlex.split needs text on Python 3 + cli_split = shlex.split(to_text(b_shebang[2:], errors='surrogate_or_strict')) + + # convert args to text + cli_split = [to_text(a, errors='surrogate_or_strict') for a in cli_split] + interpreter = cli_split[0] + args = cli_split[1:] + + return interpreter, args + + +def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False, + become_method=None, become_user=None, become_password=None, become_flags=None, environment=None, remote_is_local=False): + """ + Used to insert chunks of code into modules before transfer rather than + doing regular python imports. This allows for more efficient transfer in + a non-bootstrapping scenario by not moving extra files over the wire and + also takes care of embedding arguments in the transferred modules. + + This version is done in such a way that local imports can still be + used in the module code, so IDEs don't have to be aware of what is going on. + + Example: + + from ansible.module_utils.basic import * + + ... will result in the insertion of basic.py into the module + from the module_utils/ directory in the source tree. + + For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of + properties not available here. + + """ + task_vars = {} if task_vars is None else task_vars + environment = {} if environment is None else environment + + with open(module_path, 'rb') as f: + + # read in the module source + b_module_data = f.read() + + (b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, + async_timeout=async_timeout, become=become, become_method=become_method, + become_user=become_user, become_password=become_password, become_flags=become_flags, + environment=environment, remote_is_local=remote_is_local) + + if module_style == 'binary': + return (b_module_data, module_style, to_text(shebang, nonstring='passthru')) + elif shebang is None: + interpreter, args = _extract_interpreter(b_module_data) + # No interpreter/shebang, assume a binary module? + if interpreter is not None: + + shebang, new_interpreter = _get_shebang(interpreter, task_vars, templar, args, remote_is_local=remote_is_local) + + # update shebang + b_lines = b_module_data.split(b"\n", 1) + + if interpreter != new_interpreter: + b_lines[0] = to_bytes(shebang, errors='surrogate_or_strict', nonstring='passthru') + + if os.path.basename(interpreter).startswith(u'python'): + b_lines.insert(1, b_ENCODING_STRING) + + b_module_data = b"\n".join(b_lines) + + return (b_module_data, module_style, shebang) + + +def get_action_args_with_defaults(action, args, defaults, templar, redirected_names=None, action_groups=None): + if redirected_names: + resolved_action_name = redirected_names[-1] + else: + resolved_action_name = action + + if redirected_names is not None: + msg = ( + "Finding module_defaults for the action %s. " + "The caller passed a list of redirected action names, which is deprecated. " + "The task's resolved action should be provided as the first argument instead." + ) + display.deprecated(msg % resolved_action_name, version='2.16') + + # Get the list of groups that contain this action + if action_groups is None: + msg = ( + "Finding module_defaults for action %s. " + "The caller has not passed the action_groups, so any " + "that may include this action will be ignored." + ) + display.warning(msg=msg) + group_names = [] + else: + group_names = action_groups.get(resolved_action_name, []) + + tmp_args = {} + module_defaults = {} + + # Merge latest defaults into dict, since they are a list of dicts + if isinstance(defaults, list): + for default in defaults: + module_defaults.update(default) + + # module_defaults keys are static, but the values may be templated + module_defaults = templar.template(module_defaults) + for default in module_defaults: + if default.startswith('group/'): + group_name = default.split('group/')[-1] + if group_name in group_names: + tmp_args.update((module_defaults.get('group/%s' % group_name) or {}).copy()) + + # handle specific action defaults + tmp_args.update(module_defaults.get(resolved_action_name, {}).copy()) + + # direct args override all + tmp_args.update(args) + + return tmp_args diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py new file mode 100644 index 0000000..2449782 --- /dev/null +++ b/lib/ansible/executor/play_iterator.py @@ -0,0 +1,652 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import fnmatch + +from enum import IntEnum, IntFlag + +from ansible import constants as C +from ansible.errors import AnsibleAssertionError +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.playbook.block import Block +from ansible.playbook.task import Task +from ansible.utils.display import Display + + +display = Display() + + +__all__ = ['PlayIterator', 'IteratingStates', 'FailedStates'] + + +class IteratingStates(IntEnum): + SETUP = 0 + TASKS = 1 + RESCUE = 2 + ALWAYS = 3 + HANDLERS = 4 + COMPLETE = 5 + + +class FailedStates(IntFlag): + NONE = 0 + SETUP = 1 + TASKS = 2 + RESCUE = 4 + ALWAYS = 8 + HANDLERS = 16 + + +class HostState: + def __init__(self, blocks): + self._blocks = blocks[:] + self.handlers = [] + + self.cur_block = 0 + self.cur_regular_task = 0 + self.cur_rescue_task = 0 + self.cur_always_task = 0 + self.cur_handlers_task = 0 + self.run_state = IteratingStates.SETUP + self.fail_state = FailedStates.NONE + self.pre_flushing_run_state = None + self.update_handlers = True + self.pending_setup = False + self.tasks_child_state = None + self.rescue_child_state = None + self.always_child_state = None + self.did_rescue = False + self.did_start_at_task = False + + def __repr__(self): + return "HostState(%r)" % self._blocks + + def __str__(self): + return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, handlers=%d, run_state=%s, fail_state=%s, " + "pre_flushing_run_state=%s, update_handlers=%s, pending_setup=%s, " + "tasks child state? (%s), rescue child state? (%s), always child state? (%s), " + "did rescue? %s, did start at task? %s" % ( + self.cur_block, + self.cur_regular_task, + self.cur_rescue_task, + self.cur_always_task, + self.cur_handlers_task, + self.run_state, + self.fail_state, + self.pre_flushing_run_state, + self.update_handlers, + self.pending_setup, + self.tasks_child_state, + self.rescue_child_state, + self.always_child_state, + self.did_rescue, + self.did_start_at_task, + )) + + def __eq__(self, other): + if not isinstance(other, HostState): + return False + + for attr in ('_blocks', + 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task', 'cur_handlers_task', + 'run_state', 'fail_state', 'pre_flushing_run_state', 'update_handlers', 'pending_setup', + 'tasks_child_state', 'rescue_child_state', 'always_child_state'): + if getattr(self, attr) != getattr(other, attr): + return False + + return True + + def get_current_block(self): + return self._blocks[self.cur_block] + + def copy(self): + new_state = HostState(self._blocks) + new_state.handlers = self.handlers[:] + new_state.cur_block = self.cur_block + new_state.cur_regular_task = self.cur_regular_task + new_state.cur_rescue_task = self.cur_rescue_task + new_state.cur_always_task = self.cur_always_task + new_state.cur_handlers_task = self.cur_handlers_task + new_state.run_state = self.run_state + new_state.fail_state = self.fail_state + new_state.pre_flushing_run_state = self.pre_flushing_run_state + new_state.update_handlers = self.update_handlers + new_state.pending_setup = self.pending_setup + new_state.did_rescue = self.did_rescue + new_state.did_start_at_task = self.did_start_at_task + if self.tasks_child_state is not None: + new_state.tasks_child_state = self.tasks_child_state.copy() + if self.rescue_child_state is not None: + new_state.rescue_child_state = self.rescue_child_state.copy() + if self.always_child_state is not None: + new_state.always_child_state = self.always_child_state.copy() + return new_state + + +class PlayIterator: + + def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False): + self._play = play + self._blocks = [] + self._variable_manager = variable_manager + + setup_block = Block(play=self._play) + # Gathering facts with run_once would copy the facts from one host to + # the others. + setup_block.run_once = False + setup_task = Task(block=setup_block) + setup_task.action = 'gather_facts' + # TODO: hardcoded resolution here, but should use actual resolution code in the end, + # in case of 'legacy' mismatch + setup_task.resolved_action = 'ansible.builtin.gather_facts' + setup_task.name = 'Gathering Facts' + setup_task.args = {} + + # Unless play is specifically tagged, gathering should 'always' run + if not self._play.tags: + setup_task.tags = ['always'] + + # Default options to gather + for option in ('gather_subset', 'gather_timeout', 'fact_path'): + value = getattr(self._play, option, None) + if value is not None: + setup_task.args[option] = value + + setup_task.set_loader(self._play._loader) + # short circuit fact gathering if the entire playbook is conditional + if self._play._included_conditional is not None: + setup_task.when = self._play._included_conditional[:] + setup_block.block = [setup_task] + + setup_block = setup_block.filter_tagged_tasks(all_vars) + self._blocks.append(setup_block) + + # keep flatten (no blocks) list of all tasks from the play + # used for the lockstep mechanism in the linear strategy + self.all_tasks = setup_block.get_tasks() + + for block in self._play.compile(): + new_block = block.filter_tagged_tasks(all_vars) + if new_block.has_tasks(): + self._blocks.append(new_block) + self.all_tasks.extend(new_block.get_tasks()) + + # keep list of all handlers, it is copied into each HostState + # at the beginning of IteratingStates.HANDLERS + # the copy happens at each flush in order to restore the original + # list and remove any included handlers that might not be notified + # at the particular flush + self.handlers = [h for b in self._play.handlers for h in b.block] + + self._host_states = {} + start_at_matched = False + batch = inventory.get_hosts(self._play.hosts, order=self._play.order) + self.batch_size = len(batch) + for host in batch: + self.set_state_for_host(host.name, HostState(blocks=self._blocks)) + # if we're looking to start at a specific task, iterate through + # the tasks for this host until we find the specified task + if play_context.start_at_task is not None and not start_at_done: + while True: + (s, task) = self.get_next_task_for_host(host, peek=True) + if s.run_state == IteratingStates.COMPLETE: + break + if task.name == play_context.start_at_task or (task.name and fnmatch.fnmatch(task.name, play_context.start_at_task)) or \ + task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task): + start_at_matched = True + break + self.set_state_for_host(host.name, s) + + # finally, reset the host's state to IteratingStates.SETUP + if start_at_matched: + self._host_states[host.name].did_start_at_task = True + self._host_states[host.name].run_state = IteratingStates.SETUP + + if start_at_matched: + # we have our match, so clear the start_at_task field on the + # play context to flag that we've started at a task (and future + # plays won't try to advance) + play_context.start_at_task = None + + self.end_play = False + self.cur_task = 0 + + def get_host_state(self, host): + # Since we're using the PlayIterator to carry forward failed hosts, + # in the event that a previous host was not in the current inventory + # we create a stub state for it now + if host.name not in self._host_states: + self.set_state_for_host(host.name, HostState(blocks=[])) + + return self._host_states[host.name].copy() + + def cache_block_tasks(self, block): + display.deprecated( + 'PlayIterator.cache_block_tasks is now noop due to the changes ' + 'in the way tasks are cached and is deprecated.', + version=2.16 + ) + + def get_next_task_for_host(self, host, peek=False): + + display.debug("getting the next task for host %s" % host.name) + s = self.get_host_state(host) + + task = None + if s.run_state == IteratingStates.COMPLETE: + display.debug("host %s is done iterating, returning" % host.name) + return (s, None) + + (s, task) = self._get_next_task_from_state(s, host=host) + + if not peek: + self.set_state_for_host(host.name, s) + + display.debug("done getting next task for host %s" % host.name) + display.debug(" ^ task is: %s" % task) + display.debug(" ^ state is: %s" % s) + return (s, task) + + def _get_next_task_from_state(self, state, host): + + task = None + + # try and find the next task, given the current state. + while True: + # try to get the current block from the list of blocks, and + # if we run past the end of the list we know we're done with + # this block + try: + block = state._blocks[state.cur_block] + except IndexError: + state.run_state = IteratingStates.COMPLETE + return (state, None) + + if state.run_state == IteratingStates.SETUP: + # First, we check to see if we were pending setup. If not, this is + # the first trip through IteratingStates.SETUP, so we set the pending_setup + # flag and try to determine if we do in fact want to gather facts for + # the specified host. + if not state.pending_setup: + state.pending_setup = True + + # Gather facts if the default is 'smart' and we have not yet + # done it for this host; or if 'explicit' and the play sets + # gather_facts to True; or if 'implicit' and the play does + # NOT explicitly set gather_facts to False. + + gathering = C.DEFAULT_GATHERING + implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False) + + if (gathering == 'implicit' and implied) or \ + (gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \ + (gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('_ansible_facts_gathered', False))): + # The setup block is always self._blocks[0], as we inject it + # during the play compilation in __init__ above. + setup_block = self._blocks[0] + if setup_block.has_tasks() and len(setup_block.block) > 0: + task = setup_block.block[0] + else: + # This is the second trip through IteratingStates.SETUP, so we clear + # the flag and move onto the next block in the list while setting + # the run state to IteratingStates.TASKS + state.pending_setup = False + + state.run_state = IteratingStates.TASKS + if not state.did_start_at_task: + state.cur_block += 1 + state.cur_regular_task = 0 + state.cur_rescue_task = 0 + state.cur_always_task = 0 + state.tasks_child_state = None + state.rescue_child_state = None + state.always_child_state = None + + elif state.run_state == IteratingStates.TASKS: + # clear the pending setup flag, since we're past that and it didn't fail + if state.pending_setup: + state.pending_setup = False + + # First, we check for a child task state that is not failed, and if we + # have one recurse into it for the next task. If we're done with the child + # state, we clear it and drop back to getting the next task from the list. + if state.tasks_child_state: + (state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host) + if self._check_failed_state(state.tasks_child_state): + # failed child state, so clear it and move into the rescue portion + state.tasks_child_state = None + self._set_failed_state(state) + else: + # get the next task recursively + if task is None or state.tasks_child_state.run_state == IteratingStates.COMPLETE: + # we're done with the child state, so clear it and continue + # back to the top of the loop to get the next task + state.tasks_child_state = None + continue + else: + # First here, we check to see if we've failed anywhere down the chain + # of states we have, and if so we move onto the rescue portion. Otherwise, + # we check to see if we've moved past the end of the list of tasks. If so, + # we move into the always portion of the block, otherwise we get the next + # task from the list. + if self._check_failed_state(state): + state.run_state = IteratingStates.RESCUE + elif state.cur_regular_task >= len(block.block): + state.run_state = IteratingStates.ALWAYS + else: + task = block.block[state.cur_regular_task] + # if the current task is actually a child block, create a child + # state for us to recurse into on the next pass + if isinstance(task, Block): + state.tasks_child_state = HostState(blocks=[task]) + state.tasks_child_state.run_state = IteratingStates.TASKS + # since we've created the child state, clear the task + # so we can pick up the child state on the next pass + task = None + state.cur_regular_task += 1 + + elif state.run_state == IteratingStates.RESCUE: + # The process here is identical to IteratingStates.TASKS, except instead + # we move into the always portion of the block. + if state.rescue_child_state: + (state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host) + if self._check_failed_state(state.rescue_child_state): + state.rescue_child_state = None + self._set_failed_state(state) + else: + if task is None or state.rescue_child_state.run_state == IteratingStates.COMPLETE: + state.rescue_child_state = None + continue + else: + if state.fail_state & FailedStates.RESCUE == FailedStates.RESCUE: + state.run_state = IteratingStates.ALWAYS + elif state.cur_rescue_task >= len(block.rescue): + if len(block.rescue) > 0: + state.fail_state = FailedStates.NONE + state.run_state = IteratingStates.ALWAYS + state.did_rescue = True + else: + task = block.rescue[state.cur_rescue_task] + if isinstance(task, Block): + state.rescue_child_state = HostState(blocks=[task]) + state.rescue_child_state.run_state = IteratingStates.TASKS + task = None + state.cur_rescue_task += 1 + + elif state.run_state == IteratingStates.ALWAYS: + # And again, the process here is identical to IteratingStates.TASKS, except + # instead we either move onto the next block in the list, or we set the + # run state to IteratingStates.COMPLETE in the event of any errors, or when we + # have hit the end of the list of blocks. + if state.always_child_state: + (state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host) + if self._check_failed_state(state.always_child_state): + state.always_child_state = None + self._set_failed_state(state) + else: + if task is None or state.always_child_state.run_state == IteratingStates.COMPLETE: + state.always_child_state = None + continue + else: + if state.cur_always_task >= len(block.always): + if state.fail_state != FailedStates.NONE: + state.run_state = IteratingStates.COMPLETE + else: + state.cur_block += 1 + state.cur_regular_task = 0 + state.cur_rescue_task = 0 + state.cur_always_task = 0 + state.run_state = IteratingStates.TASKS + state.tasks_child_state = None + state.rescue_child_state = None + state.always_child_state = None + state.did_rescue = False + else: + task = block.always[state.cur_always_task] + if isinstance(task, Block): + state.always_child_state = HostState(blocks=[task]) + state.always_child_state.run_state = IteratingStates.TASKS + task = None + state.cur_always_task += 1 + + elif state.run_state == IteratingStates.HANDLERS: + if state.update_handlers: + # reset handlers for HostState since handlers from include_tasks + # might be there from previous flush + state.handlers = self.handlers[:] + state.update_handlers = False + state.cur_handlers_task = 0 + + if state.fail_state & FailedStates.HANDLERS == FailedStates.HANDLERS: + state.update_handlers = True + state.run_state = IteratingStates.COMPLETE + else: + while True: + try: + task = state.handlers[state.cur_handlers_task] + except IndexError: + task = None + state.run_state = state.pre_flushing_run_state + state.update_handlers = True + break + else: + state.cur_handlers_task += 1 + if task.is_host_notified(host): + break + + elif state.run_state == IteratingStates.COMPLETE: + return (state, None) + + # if something above set the task, break out of the loop now + if task: + break + + return (state, task) + + def _set_failed_state(self, state): + if state.run_state == IteratingStates.SETUP: + state.fail_state |= FailedStates.SETUP + state.run_state = IteratingStates.COMPLETE + elif state.run_state == IteratingStates.TASKS: + if state.tasks_child_state is not None: + state.tasks_child_state = self._set_failed_state(state.tasks_child_state) + else: + state.fail_state |= FailedStates.TASKS + if state._blocks[state.cur_block].rescue: + state.run_state = IteratingStates.RESCUE + elif state._blocks[state.cur_block].always: + state.run_state = IteratingStates.ALWAYS + else: + state.run_state = IteratingStates.COMPLETE + elif state.run_state == IteratingStates.RESCUE: + if state.rescue_child_state is not None: + state.rescue_child_state = self._set_failed_state(state.rescue_child_state) + else: + state.fail_state |= FailedStates.RESCUE + if state._blocks[state.cur_block].always: + state.run_state = IteratingStates.ALWAYS + else: + state.run_state = IteratingStates.COMPLETE + elif state.run_state == IteratingStates.ALWAYS: + if state.always_child_state is not None: + state.always_child_state = self._set_failed_state(state.always_child_state) + else: + state.fail_state |= FailedStates.ALWAYS + state.run_state = IteratingStates.COMPLETE + elif state.run_state == IteratingStates.HANDLERS: + state.fail_state |= FailedStates.HANDLERS + state.update_handlers = True + if state._blocks[state.cur_block].rescue: + state.run_state = IteratingStates.RESCUE + elif state._blocks[state.cur_block].always: + state.run_state = IteratingStates.ALWAYS + else: + state.run_state = IteratingStates.COMPLETE + return state + + def mark_host_failed(self, host): + s = self.get_host_state(host) + display.debug("marking host %s failed, current state: %s" % (host, s)) + s = self._set_failed_state(s) + display.debug("^ failed state is now: %s" % s) + self.set_state_for_host(host.name, s) + self._play._removed_hosts.append(host.name) + + def get_failed_hosts(self): + return dict((host, True) for (host, state) in self._host_states.items() if self._check_failed_state(state)) + + def _check_failed_state(self, state): + if state is None: + return False + elif state.run_state == IteratingStates.RESCUE and self._check_failed_state(state.rescue_child_state): + return True + elif state.run_state == IteratingStates.ALWAYS and self._check_failed_state(state.always_child_state): + return True + elif state.run_state == IteratingStates.HANDLERS and state.fail_state & FailedStates.HANDLERS == FailedStates.HANDLERS: + return True + elif state.fail_state != FailedStates.NONE: + if state.run_state == IteratingStates.RESCUE and state.fail_state & FailedStates.RESCUE == 0: + return False + elif state.run_state == IteratingStates.ALWAYS and state.fail_state & FailedStates.ALWAYS == 0: + return False + else: + return not (state.did_rescue and state.fail_state & FailedStates.ALWAYS == 0) + elif state.run_state == IteratingStates.TASKS and self._check_failed_state(state.tasks_child_state): + cur_block = state._blocks[state.cur_block] + if len(cur_block.rescue) > 0 and state.fail_state & FailedStates.RESCUE == 0: + return False + else: + return True + return False + + def is_failed(self, host): + s = self.get_host_state(host) + return self._check_failed_state(s) + + def clear_host_errors(self, host): + self._clear_state_errors(self.get_state_for_host(host.name)) + + def _clear_state_errors(self, state: HostState) -> None: + state.fail_state = FailedStates.NONE + + if state.tasks_child_state is not None: + self._clear_state_errors(state.tasks_child_state) + elif state.rescue_child_state is not None: + self._clear_state_errors(state.rescue_child_state) + elif state.always_child_state is not None: + self._clear_state_errors(state.always_child_state) + + def get_active_state(self, state): + ''' + Finds the active state, recursively if necessary when there are child states. + ''' + if state.run_state == IteratingStates.TASKS and state.tasks_child_state is not None: + return self.get_active_state(state.tasks_child_state) + elif state.run_state == IteratingStates.RESCUE and state.rescue_child_state is not None: + return self.get_active_state(state.rescue_child_state) + elif state.run_state == IteratingStates.ALWAYS and state.always_child_state is not None: + return self.get_active_state(state.always_child_state) + return state + + def is_any_block_rescuing(self, state): + ''' + Given the current HostState state, determines if the current block, or any child blocks, + are in rescue mode. + ''' + if state.run_state == IteratingStates.TASKS and state.get_current_block().rescue: + return True + if state.tasks_child_state is not None: + return self.is_any_block_rescuing(state.tasks_child_state) + if state.rescue_child_state is not None: + return self.is_any_block_rescuing(state.rescue_child_state) + if state.always_child_state is not None: + return self.is_any_block_rescuing(state.always_child_state) + return False + + def get_original_task(self, host, task): + display.deprecated( + 'PlayIterator.get_original_task is now noop due to the changes ' + 'in the way tasks are cached and is deprecated.', + version=2.16 + ) + return (None, None) + + def _insert_tasks_into_state(self, state, task_list): + # if we've failed at all, or if the task list is empty, just return the current state + if (state.fail_state != FailedStates.NONE and state.run_state == IteratingStates.TASKS) or not task_list: + return state + + if state.run_state == IteratingStates.TASKS: + if state.tasks_child_state: + state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list) + else: + target_block = state._blocks[state.cur_block].copy() + before = target_block.block[:state.cur_regular_task] + after = target_block.block[state.cur_regular_task:] + target_block.block = before + task_list + after + state._blocks[state.cur_block] = target_block + elif state.run_state == IteratingStates.RESCUE: + if state.rescue_child_state: + state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list) + else: + target_block = state._blocks[state.cur_block].copy() + before = target_block.rescue[:state.cur_rescue_task] + after = target_block.rescue[state.cur_rescue_task:] + target_block.rescue = before + task_list + after + state._blocks[state.cur_block] = target_block + elif state.run_state == IteratingStates.ALWAYS: + if state.always_child_state: + state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list) + else: + target_block = state._blocks[state.cur_block].copy() + before = target_block.always[:state.cur_always_task] + after = target_block.always[state.cur_always_task:] + target_block.always = before + task_list + after + state._blocks[state.cur_block] = target_block + elif state.run_state == IteratingStates.HANDLERS: + state.handlers[state.cur_handlers_task:state.cur_handlers_task] = [h for b in task_list for h in b.block] + + return state + + def add_tasks(self, host, task_list): + self.set_state_for_host(host.name, self._insert_tasks_into_state(self.get_host_state(host), task_list)) + + @property + def host_states(self): + return self._host_states + + def get_state_for_host(self, hostname: str) -> HostState: + return self._host_states[hostname] + + def set_state_for_host(self, hostname: str, state: HostState) -> None: + if not isinstance(state, HostState): + raise AnsibleAssertionError('Expected state to be a HostState but was a %s' % type(state)) + self._host_states[hostname] = state + + def set_run_state_for_host(self, hostname: str, run_state: IteratingStates) -> None: + if not isinstance(run_state, IteratingStates): + raise AnsibleAssertionError('Expected run_state to be a IteratingStates but was %s' % (type(run_state))) + self._host_states[hostname].run_state = run_state + + def set_fail_state_for_host(self, hostname: str, fail_state: FailedStates) -> None: + if not isinstance(fail_state, FailedStates): + raise AnsibleAssertionError('Expected fail_state to be a FailedStates but was %s' % (type(fail_state))) + self._host_states[hostname].fail_state = fail_state diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py new file mode 100644 index 0000000..e8b2a3d --- /dev/null +++ b/lib/ansible/executor/playbook_executor.py @@ -0,0 +1,335 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible import constants as C +from ansible import context +from ansible.executor.task_queue_manager import TaskQueueManager, AnsibleEndPlay +from ansible.module_utils._text import to_text +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.loader import become_loader, connection_loader, shell_loader +from ansible.playbook import Playbook +from ansible.template import Templar +from ansible.utils.helpers import pct_to_int +from ansible.utils.collection_loader import AnsibleCollectionConfig +from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path +from ansible.utils.path import makedirs_safe +from ansible.utils.ssh_functions import set_default_transport +from ansible.utils.display import Display + + +display = Display() + + +class PlaybookExecutor: + + ''' + This is the primary class for executing playbooks, and thus the + basis for bin/ansible-playbook operation. + ''' + + def __init__(self, playbooks, inventory, variable_manager, loader, passwords): + self._playbooks = playbooks + self._inventory = inventory + self._variable_manager = variable_manager + self._loader = loader + self.passwords = passwords + self._unreachable_hosts = dict() + + if context.CLIARGS.get('listhosts') or context.CLIARGS.get('listtasks') or \ + context.CLIARGS.get('listtags') or context.CLIARGS.get('syntax'): + self._tqm = None + else: + self._tqm = TaskQueueManager( + inventory=inventory, + variable_manager=variable_manager, + loader=loader, + passwords=self.passwords, + forks=context.CLIARGS.get('forks'), + ) + + # Note: We run this here to cache whether the default ansible ssh + # executable supports control persist. Sometime in the future we may + # need to enhance this to check that ansible_ssh_executable specified + # in inventory is also cached. We can't do this caching at the point + # where it is used (in task_executor) because that is post-fork and + # therefore would be discarded after every task. + set_default_transport() + + def run(self): + ''' + Run the given playbook, based on the settings in the play which + may limit the runs to serialized groups, etc. + ''' + + result = 0 + entrylist = [] + entry = {} + try: + # preload become/connection/shell to set config defs cached + list(connection_loader.all(class_only=True)) + list(shell_loader.all(class_only=True)) + list(become_loader.all(class_only=True)) + + for playbook in self._playbooks: + + # deal with FQCN + resource = _get_collection_playbook_path(playbook) + if resource is not None: + playbook_path = resource[1] + playbook_collection = resource[2] + else: + playbook_path = playbook + # not fqcn, but might still be colleciotn playbook + playbook_collection = _get_collection_name_from_path(playbook) + + if playbook_collection: + display.warning("running playbook inside collection {0}".format(playbook_collection)) + AnsibleCollectionConfig.default_collection = playbook_collection + else: + AnsibleCollectionConfig.default_collection = None + + pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) + # FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path))) + + if self._tqm is None: # we are doing a listing + entry = {'playbook': playbook_path} + entry['plays'] = [] + else: + # make sure the tqm has callbacks loaded + self._tqm.load_callbacks() + self._tqm.send_callback('v2_playbook_on_start', pb) + + i = 1 + plays = pb.get_plays() + display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path))) + + for play in plays: + if play._included_path is not None: + self._loader.set_basedir(play._included_path) + else: + self._loader.set_basedir(pb._basedir) + + # clear any filters which may have been applied to the inventory + self._inventory.remove_restriction() + + # Allow variables to be used in vars_prompt fields. + all_vars = self._variable_manager.get_vars(play=play) + templar = Templar(loader=self._loader, variables=all_vars) + setattr(play, 'vars_prompt', templar.template(play.vars_prompt)) + + # FIXME: this should be a play 'sub object' like loop_control + if play.vars_prompt: + for var in play.vars_prompt: + vname = var['name'] + prompt = var.get("prompt", vname) + default = var.get("default", None) + private = boolean(var.get("private", True)) + confirm = boolean(var.get("confirm", False)) + encrypt = var.get("encrypt", None) + salt_size = var.get("salt_size", None) + salt = var.get("salt", None) + unsafe = var.get("unsafe", None) + + if vname not in self._variable_manager.extra_vars: + if self._tqm: + self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, + default, unsafe) + play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe) + else: # we are either in --list-