summaryrefslogtreecommitdiffstats
path: root/bin
diff options
context:
space:
mode:
Diffstat (limited to 'bin')
-rwxr-xr-xbin/ansible207
-rwxr-xr-xbin/ansible-config551
-rwxr-xr-xbin/ansible-connection354
-rwxr-xr-xbin/ansible-console604
-rwxr-xr-xbin/ansible-doc1393
-rwxr-xr-xbin/ansible-galaxy1865
-rwxr-xr-xbin/ansible-inventory417
-rwxr-xr-xbin/ansible-playbook231
-rwxr-xr-xbin/ansible-pull364
-rwxr-xr-xbin/ansible-test45
-rwxr-xr-xbin/ansible-vault480
11 files changed, 6511 insertions, 0 deletions
diff --git a/bin/ansible b/bin/ansible
new file mode 100755
index 0000000..e90b44c
--- /dev/null
+++ b/bin/ansible
@@ -0,0 +1,207 @@
+#!/usr/bin/env python
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
+from ansible.executor.task_queue_manager import TaskQueueManager
+from ansible.module_utils._text import to_text
+from ansible.parsing.splitter import parse_kv
+from ansible.parsing.utils.yaml import from_yaml
+from ansible.playbook import Playbook
+from ansible.playbook.play import Play
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class AdHocCLI(CLI):
+ ''' is an extra-simple tool/framework/API for doing 'remote things'.
+ this command allows you to define and run a single task 'playbook' against a set of hosts
+ '''
+
+ name = 'ansible'
+
+ def init_parser(self):
+ ''' create an options parser for bin/ansible '''
+ super(AdHocCLI, self).init_parser(usage='%prog <host-pattern> [options]',
+ desc="Define and run a single task 'playbook' against a set of hosts",
+ epilog="Some actions do not make sense in Ad-Hoc (include, meta, etc)")
+
+ opt_help.add_runas_options(self.parser)
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_async_options(self.parser)
+ opt_help.add_output_options(self.parser)
+ opt_help.add_connect_options(self.parser)
+ opt_help.add_check_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_fork_options(self.parser)
+ opt_help.add_module_options(self.parser)
+ opt_help.add_basedir_options(self.parser)
+ opt_help.add_tasknoplay_options(self.parser)
+
+ # options unique to ansible ad-hoc
+ self.parser.add_argument('-a', '--args', dest='module_args',
+ help="The action's options in space separated k=v format: -a 'opt1=val1 opt2=val2' "
+ "or a json string: -a '{\"opt1\": \"val1\", \"opt2\": \"val2\"}'",
+ default=C.DEFAULT_MODULE_ARGS)
+ self.parser.add_argument('-m', '--module-name', dest='module_name',
+ help="Name of the action to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
+ default=C.DEFAULT_MODULE_NAME)
+ self.parser.add_argument('args', metavar='pattern', help='host pattern')
+
+ def post_process_args(self, options):
+ '''Post process and validate options for bin/ansible '''
+
+ options = super(AdHocCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options, runas_opts=True, fork_opts=True)
+
+ return options
+
+ def _play_ds(self, pattern, async_val, poll):
+ check_raw = context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS
+
+ module_args_raw = context.CLIARGS['module_args']
+ module_args = None
+ if module_args_raw and module_args_raw.startswith('{') and module_args_raw.endswith('}'):
+ try:
+ module_args = from_yaml(module_args_raw.strip(), json_only=True)
+ except AnsibleParserError:
+ pass
+
+ if not module_args:
+ module_args = parse_kv(module_args_raw, check_raw=check_raw)
+
+ mytask = {'action': {'module': context.CLIARGS['module_name'], 'args': module_args},
+ 'timeout': context.CLIARGS['task_timeout']}
+
+ # avoid adding to tasks that don't support it, unless set, then give user an error
+ if context.CLIARGS['module_name'] not in C._ACTION_ALL_INCLUDE_ROLE_TASKS and any(frozenset((async_val, poll))):
+ mytask['async_val'] = async_val
+ mytask['poll'] = poll
+
+ return dict(
+ name="Ansible Ad-Hoc",
+ hosts=pattern,
+ gather_facts='no',
+ tasks=[mytask])
+
+ def run(self):
+ ''' create and execute the single task playbook '''
+
+ super(AdHocCLI, self).run()
+
+ # only thing left should be host pattern
+ pattern = to_text(context.CLIARGS['args'], errors='surrogate_or_strict')
+
+ # handle password prompts
+ sshpass = None
+ becomepass = None
+
+ (sshpass, becomepass) = self.ask_passwords()
+ passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
+
+ # get basic objects
+ loader, inventory, variable_manager = self._play_prereqs()
+
+ # get list of hosts to execute against
+ try:
+ hosts = self.get_host_list(inventory, context.CLIARGS['subset'], pattern)
+ except AnsibleError:
+ if context.CLIARGS['subset']:
+ raise
+ else:
+ hosts = []
+ display.warning("No hosts matched, nothing to do")
+
+ # just listing hosts?
+ if context.CLIARGS['listhosts']:
+ display.display(' hosts (%d):' % len(hosts))
+ for host in hosts:
+ display.display(' %s' % host)
+ return 0
+
+ # verify we have arguments if we know we need em
+ if context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS and not context.CLIARGS['module_args']:
+ err = "No argument passed to %s module" % context.CLIARGS['module_name']
+ if pattern.endswith(".yml"):
+ err = err + ' (did you mean to run ansible-playbook?)'
+ raise AnsibleOptionsError(err)
+
+ # Avoid modules that don't work with ad-hoc
+ if context.CLIARGS['module_name'] in C._ACTION_IMPORT_PLAYBOOK:
+ raise AnsibleOptionsError("'%s' is not a valid action for ad-hoc commands"
+ % context.CLIARGS['module_name'])
+
+ # construct playbook objects to wrap task
+ play_ds = self._play_ds(pattern, context.CLIARGS['seconds'], context.CLIARGS['poll_interval'])
+ play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
+
+ # used in start callback
+ playbook = Playbook(loader)
+ playbook._entries.append(play)
+ playbook._file_name = '__adhoc_playbook__'
+
+ if self.callback:
+ cb = self.callback
+ elif context.CLIARGS['one_line']:
+ cb = 'oneline'
+ # Respect custom 'stdout_callback' only with enabled 'bin_ansible_callbacks'
+ elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default':
+ cb = C.DEFAULT_STDOUT_CALLBACK
+ else:
+ cb = 'minimal'
+
+ run_tree = False
+ if context.CLIARGS['tree']:
+ C.CALLBACKS_ENABLED.append('tree')
+ C.TREE_DIR = context.CLIARGS['tree']
+ run_tree = True
+
+ # now create a task queue manager to execute the play
+ self._tqm = None
+ try:
+ self._tqm = TaskQueueManager(
+ inventory=inventory,
+ variable_manager=variable_manager,
+ loader=loader,
+ passwords=passwords,
+ stdout_callback=cb,
+ run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
+ run_tree=run_tree,
+ forks=context.CLIARGS['forks'],
+ )
+
+ self._tqm.load_callbacks()
+ self._tqm.send_callback('v2_playbook_on_start', playbook)
+
+ result = self._tqm.run(play)
+
+ self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
+ finally:
+ if self._tqm:
+ self._tqm.cleanup()
+ if loader:
+ loader.cleanup_all_tmp_files()
+
+ return result
+
+
+def main(args=None):
+ AdHocCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/bin/ansible-config b/bin/ansible-config
new file mode 100755
index 0000000..3a5c242
--- /dev/null
+++ b/bin/ansible-config
@@ -0,0 +1,551 @@
+#!/usr/bin/env python
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import os
+import yaml
+import shlex
+import subprocess
+
+from collections.abc import Mapping
+
+from ansible import context
+import ansible.plugins.loader as plugin_loader
+
+from ansible import constants as C
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.config.manager import ConfigManager, Setting
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.module_utils._text import to_native, to_text, to_bytes
+from ansible.module_utils.common.json import json_dump
+from ansible.module_utils.six import string_types
+from ansible.parsing.quoting import is_quoted
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.utils.color import stringc
+from ansible.utils.display import Display
+from ansible.utils.path import unfrackpath
+
+display = Display()
+
+
+def yaml_dump(data, default_flow_style=False, default_style=None):
+ return yaml.dump(data, Dumper=AnsibleDumper, default_flow_style=default_flow_style, default_style=default_style)
+
+
+def yaml_short(data):
+ return yaml_dump(data, default_flow_style=True, default_style="''")
+
+
+def get_constants():
+ ''' helper method to ensure we can template based on existing constants '''
+ if not hasattr(get_constants, 'cvars'):
+ get_constants.cvars = {k: getattr(C, k) for k in dir(C) if not k.startswith('__')}
+ return get_constants.cvars
+
+
+class ConfigCLI(CLI):
+ """ Config command line class """
+
+ name = 'ansible-config'
+
+ def __init__(self, args, callback=None):
+
+ self.config_file = None
+ self.config = None
+ super(ConfigCLI, self).__init__(args, callback)
+
+ def init_parser(self):
+
+ super(ConfigCLI, self).init_parser(
+ desc="View ansible configuration.",
+ )
+
+ common = opt_help.argparse.ArgumentParser(add_help=False)
+ opt_help.add_verbosity_options(common)
+ common.add_argument('-c', '--config', dest='config_file',
+ help="path to configuration file, defaults to first file found in precedence.")
+ common.add_argument("-t", "--type", action="store", default='base', dest='type', choices=['all', 'base'] + list(C.CONFIGURABLE_PLUGINS),
+ help="Filter down to a specific plugin type.")
+ common.add_argument('args', help='Specific plugin to target, requires type of plugin to be set', nargs='*')
+
+ subparsers = self.parser.add_subparsers(dest='action')
+ subparsers.required = True
+
+ list_parser = subparsers.add_parser('list', help='Print all config options', parents=[common])
+ list_parser.set_defaults(func=self.execute_list)
+ list_parser.add_argument('--format', '-f', dest='format', action='store', choices=['json', 'yaml'], default='yaml',
+ help='Output format for list')
+
+ dump_parser = subparsers.add_parser('dump', help='Dump configuration', parents=[common])
+ dump_parser.set_defaults(func=self.execute_dump)
+ dump_parser.add_argument('--only-changed', '--changed-only', dest='only_changed', action='store_true',
+ help="Only show configurations that have changed from the default")
+ dump_parser.add_argument('--format', '-f', dest='format', action='store', choices=['json', 'yaml', 'display'], default='display',
+ help='Output format for dump')
+
+ view_parser = subparsers.add_parser('view', help='View configuration file', parents=[common])
+ view_parser.set_defaults(func=self.execute_view)
+
+ init_parser = subparsers.add_parser('init', help='Create initial configuration', parents=[common])
+ init_parser.set_defaults(func=self.execute_init)
+ init_parser.add_argument('--format', '-f', dest='format', action='store', choices=['ini', 'env', 'vars'], default='ini',
+ help='Output format for init')
+ init_parser.add_argument('--disabled', dest='commented', action='store_true', default=False,
+ help='Prefixes all entries with a comment character to disable them')
+
+ # search_parser = subparsers.add_parser('find', help='Search configuration')
+ # search_parser.set_defaults(func=self.execute_search)
+ # search_parser.add_argument('args', help='Search term', metavar='<search term>')
+
+ def post_process_args(self, options):
+ options = super(ConfigCLI, self).post_process_args(options)
+ display.verbosity = options.verbosity
+
+ return options
+
+ def run(self):
+
+ super(ConfigCLI, self).run()
+
+ if context.CLIARGS['config_file']:
+ self.config_file = unfrackpath(context.CLIARGS['config_file'], follow=False)
+ b_config = to_bytes(self.config_file)
+ if os.path.exists(b_config) and os.access(b_config, os.R_OK):
+ self.config = ConfigManager(self.config_file)
+ else:
+ raise AnsibleOptionsError('The provided configuration file is missing or not accessible: %s' % to_native(self.config_file))
+ else:
+ self.config = C.config
+ self.config_file = self.config._config_file
+
+ if self.config_file:
+ try:
+ if not os.path.exists(self.config_file):
+ raise AnsibleOptionsError("%s does not exist or is not accessible" % (self.config_file))
+ elif not os.path.isfile(self.config_file):
+ raise AnsibleOptionsError("%s is not a valid file" % (self.config_file))
+
+ os.environ['ANSIBLE_CONFIG'] = to_native(self.config_file)
+ except Exception:
+ if context.CLIARGS['action'] in ['view']:
+ raise
+ elif context.CLIARGS['action'] in ['edit', 'update']:
+ display.warning("File does not exist, used empty file: %s" % self.config_file)
+
+ elif context.CLIARGS['action'] == 'view':
+ raise AnsibleError('Invalid or no config file was supplied')
+
+ # run the requested action
+ context.CLIARGS['func']()
+
+ def execute_update(self):
+ '''
+ Updates a single setting in the specified ansible.cfg
+ '''
+ raise AnsibleError("Option not implemented yet")
+
+ # pylint: disable=unreachable
+ if context.CLIARGS['setting'] is None:
+ raise AnsibleOptionsError("update option requires a setting to update")
+
+ (entry, value) = context.CLIARGS['setting'].split('=')
+ if '.' in entry:
+ (section, option) = entry.split('.')
+ else:
+ section = 'defaults'
+ option = entry
+ subprocess.call([
+ 'ansible',
+ '-m', 'ini_file',
+ 'localhost',
+ '-c', 'local',
+ '-a', '"dest=%s section=%s option=%s value=%s backup=yes"' % (self.config_file, section, option, value)
+ ])
+
+ def execute_view(self):
+ '''
+ Displays the current config file
+ '''
+ try:
+ with open(self.config_file, 'rb') as f:
+ self.pager(to_text(f.read(), errors='surrogate_or_strict'))
+ except Exception as e:
+ raise AnsibleError("Failed to open config file: %s" % to_native(e))
+
+ def execute_edit(self):
+ '''
+ Opens ansible.cfg in the default EDITOR
+ '''
+ raise AnsibleError("Option not implemented yet")
+
+ # pylint: disable=unreachable
+ try:
+ editor = shlex.split(os.environ.get('EDITOR', 'vi'))
+ editor.append(self.config_file)
+ subprocess.call(editor)
+ except Exception as e:
+ raise AnsibleError("Failed to open editor: %s" % to_native(e))
+
+ def _list_plugin_settings(self, ptype, plugins=None):
+ entries = {}
+ loader = getattr(plugin_loader, '%s_loader' % ptype)
+
+ # build list
+ if plugins:
+ plugin_cs = []
+ for plugin in plugins:
+ p = loader.get(plugin, class_only=True)
+ if p is None:
+ display.warning("Skipping %s as we could not find matching plugin" % plugin)
+ else:
+ plugin_cs.append(p)
+ else:
+ plugin_cs = loader.all(class_only=True)
+
+ # iterate over class instances
+ for plugin in plugin_cs:
+ finalname = name = plugin._load_name
+ if name.startswith('_'):
+ # alias or deprecated
+ if os.path.islink(plugin._original_path):
+ continue
+ else:
+ finalname = name.replace('_', '', 1) + ' (DEPRECATED)'
+
+ entries[finalname] = self.config.get_configuration_definitions(ptype, name)
+
+ return entries
+
+ def _list_entries_from_args(self):
+ '''
+ build a dict with the list requested configs
+ '''
+ config_entries = {}
+ if context.CLIARGS['type'] in ('base', 'all'):
+ # this dumps main/common configs
+ config_entries = self.config.get_configuration_definitions(ignore_private=True)
+
+ if context.CLIARGS['type'] != 'base':
+ config_entries['PLUGINS'] = {}
+
+ if context.CLIARGS['type'] == 'all':
+ # now each plugin type
+ for ptype in C.CONFIGURABLE_PLUGINS:
+ config_entries['PLUGINS'][ptype.upper()] = self._list_plugin_settings(ptype)
+ elif context.CLIARGS['type'] != 'base':
+ config_entries['PLUGINS'][context.CLIARGS['type']] = self._list_plugin_settings(context.CLIARGS['type'], context.CLIARGS['args'])
+
+ return config_entries
+
+ def execute_list(self):
+ '''
+ list and output available configs
+ '''
+
+ config_entries = self._list_entries_from_args()
+ if context.CLIARGS['format'] == 'yaml':
+ output = yaml_dump(config_entries)
+ elif context.CLIARGS['format'] == 'json':
+ output = json_dump(config_entries)
+
+ self.pager(to_text(output, errors='surrogate_or_strict'))
+
+ def _get_settings_vars(self, settings, subkey):
+
+ data = []
+ if context.CLIARGS['commented']:
+ prefix = '#'
+ else:
+ prefix = ''
+
+ for setting in settings:
+
+ if not settings[setting].get('description'):
+ continue
+
+ default = settings[setting].get('default', '')
+ if subkey == 'env':
+ stype = settings[setting].get('type', '')
+ if stype == 'boolean':
+ if default:
+ default = '1'
+ else:
+ default = '0'
+ elif default:
+ if stype == 'list':
+ if not isinstance(default, string_types):
+ # python lists are not valid env ones
+ try:
+ default = ', '.join(default)
+ except Exception as e:
+ # list of other stuff
+ default = '%s' % to_native(default)
+ if isinstance(default, string_types) and not is_quoted(default):
+ default = shlex.quote(default)
+ elif default is None:
+ default = ''
+
+ if subkey in settings[setting] and settings[setting][subkey]:
+ entry = settings[setting][subkey][-1]['name']
+ if isinstance(settings[setting]['description'], string_types):
+ desc = settings[setting]['description']
+ else:
+ desc = '\n#'.join(settings[setting]['description'])
+ name = settings[setting].get('name', setting)
+ data.append('# %s(%s): %s' % (name, settings[setting].get('type', 'string'), desc))
+
+ # TODO: might need quoting and value coercion depending on type
+ if subkey == 'env':
+ if entry.startswith('_ANSIBLE_'):
+ continue
+ data.append('%s%s=%s' % (prefix, entry, default))
+ elif subkey == 'vars':
+ if entry.startswith('_ansible_'):
+ continue
+ data.append(prefix + '%s: %s' % (entry, to_text(yaml_short(default), errors='surrogate_or_strict')))
+ data.append('')
+
+ return data
+
+ def _get_settings_ini(self, settings):
+
+ sections = {}
+ for o in sorted(settings.keys()):
+
+ opt = settings[o]
+
+ if not isinstance(opt, Mapping):
+ # recursed into one of the few settings that is a mapping, now hitting it's strings
+ continue
+
+ if not opt.get('description'):
+ # its a plugin
+ new_sections = self._get_settings_ini(opt)
+ for s in new_sections:
+ if s in sections:
+ sections[s].extend(new_sections[s])
+ else:
+ sections[s] = new_sections[s]
+ continue
+
+ if isinstance(opt['description'], string_types):
+ desc = '# (%s) %s' % (opt.get('type', 'string'), opt['description'])
+ else:
+ desc = "# (%s) " % opt.get('type', 'string')
+ desc += "\n# ".join(opt['description'])
+
+ if 'ini' in opt and opt['ini']:
+ entry = opt['ini'][-1]
+ if entry['section'] not in sections:
+ sections[entry['section']] = []
+
+ default = opt.get('default', '')
+ if opt.get('type', '') == 'list' and not isinstance(default, string_types):
+ # python lists are not valid ini ones
+ default = ', '.join(default)
+ elif default is None:
+ default = ''
+
+ if context.CLIARGS['commented']:
+ entry['key'] = ';%s' % entry['key']
+
+ key = desc + '\n%s=%s' % (entry['key'], default)
+ sections[entry['section']].append(key)
+
+ return sections
+
+ def execute_init(self):
+
+ data = []
+ config_entries = self._list_entries_from_args()
+ plugin_types = config_entries.pop('PLUGINS', None)
+
+ if context.CLIARGS['format'] == 'ini':
+ sections = self._get_settings_ini(config_entries)
+
+ if plugin_types:
+ for ptype in plugin_types:
+ plugin_sections = self._get_settings_ini(plugin_types[ptype])
+ for s in plugin_sections:
+ if s in sections:
+ sections[s].extend(plugin_sections[s])
+ else:
+ sections[s] = plugin_sections[s]
+
+ if sections:
+ for section in sections.keys():
+ data.append('[%s]' % section)
+ for key in sections[section]:
+ data.append(key)
+ data.append('')
+ data.append('')
+
+ elif context.CLIARGS['format'] in ('env', 'vars'): # TODO: add yaml once that config option is added
+ data = self._get_settings_vars(config_entries, context.CLIARGS['format'])
+ if plugin_types:
+ for ptype in plugin_types:
+ for plugin in plugin_types[ptype].keys():
+ data.extend(self._get_settings_vars(plugin_types[ptype][plugin], context.CLIARGS['format']))
+
+ self.pager(to_text('\n'.join(data), errors='surrogate_or_strict'))
+
+ def _render_settings(self, config):
+
+ entries = []
+ for setting in sorted(config):
+ changed = (config[setting].origin not in ('default', 'REQUIRED'))
+
+ if context.CLIARGS['format'] == 'display':
+ if isinstance(config[setting], Setting):
+ # proceed normally
+ if config[setting].origin == 'default':
+ color = 'green'
+ elif config[setting].origin == 'REQUIRED':
+ # should include '_terms', '_input', etc
+ color = 'red'
+ else:
+ color = 'yellow'
+ msg = "%s(%s) = %s" % (setting, config[setting].origin, config[setting].value)
+ else:
+ color = 'green'
+ msg = "%s(%s) = %s" % (setting, 'default', config[setting].get('default'))
+
+ entry = stringc(msg, color)
+ else:
+ entry = {}
+ for key in config[setting]._fields:
+ entry[key] = getattr(config[setting], key)
+
+ if not context.CLIARGS['only_changed'] or changed:
+ entries.append(entry)
+
+ return entries
+
+ def _get_global_configs(self):
+ config = self.config.get_configuration_definitions(ignore_private=True).copy()
+ for setting in config.keys():
+ v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, variables=get_constants())
+ config[setting] = Setting(setting, v, o, None)
+
+ return self._render_settings(config)
+
+ def _get_plugin_configs(self, ptype, plugins):
+
+ # prep loading
+ loader = getattr(plugin_loader, '%s_loader' % ptype)
+
+ # acumulators
+ output = []
+ config_entries = {}
+
+ # build list
+ if plugins:
+ plugin_cs = []
+ for plugin in plugins:
+ p = loader.get(plugin, class_only=True)
+ if p is None:
+ display.warning("Skipping %s as we could not find matching plugin" % plugin)
+ else:
+ plugin_cs.append(loader.get(plugin, class_only=True))
+ else:
+ plugin_cs = loader.all(class_only=True)
+
+ for plugin in plugin_cs:
+ # in case of deprecastion they diverge
+ finalname = name = plugin._load_name
+ if name.startswith('_'):
+ if os.path.islink(plugin._original_path):
+ # skip alias
+ continue
+ # deprecated, but use 'nice name'
+ finalname = name.replace('_', '', 1) + ' (DEPRECATED)'
+
+ # default entries per plugin
+ config_entries[finalname] = self.config.get_configuration_definitions(ptype, name)
+
+ try:
+ # populate config entries by loading plugin
+ dump = loader.get(name, class_only=True)
+ except Exception as e:
+ display.warning('Skipping "%s" %s plugin, as we cannot load plugin to check config due to : %s' % (name, ptype, to_native(e)))
+ continue
+
+ # actually get the values
+ for setting in config_entries[finalname].keys():
+ try:
+ v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, plugin_type=ptype, plugin_name=name, variables=get_constants())
+ except AnsibleError as e:
+ if to_text(e).startswith('No setting was provided for required configuration'):
+ v = None
+ o = 'REQUIRED'
+ else:
+ raise e
+
+ if v is None and o is None:
+ # not all cases will be error
+ o = 'REQUIRED'
+
+ config_entries[finalname][setting] = Setting(setting, v, o, None)
+
+ # pretty please!
+ results = self._render_settings(config_entries[finalname])
+ if results:
+ if context.CLIARGS['format'] == 'display':
+ # avoid header for empty lists (only changed!)
+ output.append('\n%s:\n%s' % (finalname, '_' * len(finalname)))
+ output.extend(results)
+ else:
+ output.append({finalname: results})
+
+ return output
+
+ def execute_dump(self):
+ '''
+ Shows the current settings, merges ansible.cfg if specified
+ '''
+ if context.CLIARGS['type'] == 'base':
+ # deal with base
+ output = self._get_global_configs()
+ elif context.CLIARGS['type'] == 'all':
+ # deal with base
+ output = self._get_global_configs()
+ # deal with plugins
+ for ptype in C.CONFIGURABLE_PLUGINS:
+ plugin_list = self._get_plugin_configs(ptype, context.CLIARGS['args'])
+ if context.CLIARGS['format'] == 'display':
+ if not context.CLIARGS['only_changed'] or plugin_list:
+ output.append('\n%s:\n%s' % (ptype.upper(), '=' * len(ptype)))
+ output.extend(plugin_list)
+ else:
+ if ptype in ('modules', 'doc_fragments'):
+ pname = ptype.upper()
+ else:
+ pname = '%s_PLUGINS' % ptype.upper()
+ output.append({pname: plugin_list})
+ else:
+ # deal with plugins
+ output = self._get_plugin_configs(context.CLIARGS['type'], context.CLIARGS['args'])
+
+ if context.CLIARGS['format'] == 'display':
+ text = '\n'.join(output)
+ if context.CLIARGS['format'] == 'yaml':
+ text = yaml_dump(output)
+ elif context.CLIARGS['format'] == 'json':
+ text = json_dump(output)
+
+ self.pager(to_text(text, errors='surrogate_or_strict'))
+
+
+def main(args=None):
+ ConfigCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/bin/ansible-connection b/bin/ansible-connection
new file mode 100755
index 0000000..9109137
--- /dev/null
+++ b/bin/ansible-connection
@@ -0,0 +1,354 @@
+#!/usr/bin/env python
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+import argparse
+import fcntl
+import hashlib
+import io
+import os
+import pickle
+import signal
+import socket
+import sys
+import time
+import traceback
+import errno
+import json
+
+from contextlib import contextmanager
+
+from ansible import constants as C
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.connection import Connection, ConnectionError, send_data, recv_data
+from ansible.module_utils.service import fork_process
+from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.loader import connection_loader
+from ansible.utils.path import unfrackpath, makedirs_safe
+from ansible.utils.display import Display
+from ansible.utils.jsonrpc import JsonRpcServer
+
+display = Display()
+
+
+def read_stream(byte_stream):
+ size = int(byte_stream.readline().strip())
+
+ data = byte_stream.read(size)
+ if len(data) < size:
+ raise Exception("EOF found before data was complete")
+
+ data_hash = to_text(byte_stream.readline().strip())
+ if data_hash != hashlib.sha1(data).hexdigest():
+ raise Exception("Read {0} bytes, but data did not match checksum".format(size))
+
+ # restore escaped loose \r characters
+ data = data.replace(br'\r', b'\r')
+
+ return data
+
+
+@contextmanager
+def file_lock(lock_path):
+ """
+ Uses contextmanager to create and release a file lock based on the
+ given path. This allows us to create locks using `with file_lock()`
+ to prevent deadlocks related to failure to unlock properly.
+ """
+
+ lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT, 0o600)
+ fcntl.lockf(lock_fd, fcntl.LOCK_EX)
+ yield
+ fcntl.lockf(lock_fd, fcntl.LOCK_UN)
+ os.close(lock_fd)
+
+
+class ConnectionProcess(object):
+ '''
+ The connection process wraps around a Connection object that manages
+ the connection to a remote device that persists over the playbook
+ '''
+ def __init__(self, fd, play_context, socket_path, original_path, task_uuid=None, ansible_playbook_pid=None):
+ self.play_context = play_context
+ self.socket_path = socket_path
+ self.original_path = original_path
+ self._task_uuid = task_uuid
+
+ self.fd = fd
+ self.exception = None
+
+ self.srv = JsonRpcServer()
+ self.sock = None
+
+ self.connection = None
+ self._ansible_playbook_pid = ansible_playbook_pid
+
+ def start(self, options):
+ messages = list()
+ result = {}
+
+ try:
+ messages.append(('vvvv', 'control socket path is %s' % self.socket_path))
+
+ # If this is a relative path (~ gets expanded later) then plug the
+ # key's path on to the directory we originally came from, so we can
+ # find it now that our cwd is /
+ if self.play_context.private_key_file and self.play_context.private_key_file[0] not in '~/':
+ self.play_context.private_key_file = os.path.join(self.original_path, self.play_context.private_key_file)
+ self.connection = connection_loader.get(self.play_context.connection, self.play_context, '/dev/null',
+ task_uuid=self._task_uuid, ansible_playbook_pid=self._ansible_playbook_pid)
+ try:
+ self.connection.set_options(direct=options)
+ except ConnectionError as exc:
+ messages.append(('debug', to_text(exc)))
+ raise ConnectionError('Unable to decode JSON from response set_options. See the debug log for more information.')
+
+ self.connection._socket_path = self.socket_path
+ self.srv.register(self.connection)
+ messages.extend([('vvvv', msg) for msg in sys.stdout.getvalue().splitlines()])
+
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.sock.bind(self.socket_path)
+ self.sock.listen(1)
+ messages.append(('vvvv', 'local domain socket listeners started successfully'))
+ except Exception as exc:
+ messages.extend(self.connection.pop_messages())
+ result['error'] = to_text(exc)
+ result['exception'] = traceback.format_exc()
+ finally:
+ result['messages'] = messages
+ self.fd.write(json.dumps(result, cls=AnsibleJSONEncoder))
+ self.fd.close()
+
+ def run(self):
+ try:
+ log_messages = self.connection.get_option('persistent_log_messages')
+ while not self.connection._conn_closed:
+ signal.signal(signal.SIGALRM, self.connect_timeout)
+ signal.signal(signal.SIGTERM, self.handler)
+ signal.alarm(self.connection.get_option('persistent_connect_timeout'))
+
+ self.exception = None
+ (s, addr) = self.sock.accept()
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, self.command_timeout)
+ while True:
+ data = recv_data(s)
+ if not data:
+ break
+
+ if log_messages:
+ display.display("jsonrpc request: %s" % data, log_only=True)
+
+ request = json.loads(to_text(data, errors='surrogate_or_strict'))
+ if request.get('method') == "exec_command" and not self.connection.connected:
+ self.connection._connect()
+
+ signal.alarm(self.connection.get_option('persistent_command_timeout'))
+
+ resp = self.srv.handle_request(data)
+ signal.alarm(0)
+
+ if log_messages:
+ display.display("jsonrpc response: %s" % resp, log_only=True)
+
+ send_data(s, to_bytes(resp))
+
+ s.close()
+
+ except Exception as e:
+ # socket.accept() will raise EINTR if the socket.close() is called
+ if hasattr(e, 'errno'):
+ if e.errno != errno.EINTR:
+ self.exception = traceback.format_exc()
+ else:
+ self.exception = traceback.format_exc()
+
+ finally:
+ # allow time for any exception msg send over socket to receive at other end before shutting down
+ time.sleep(0.1)
+
+ # when done, close the connection properly and cleanup the socket file so it can be recreated
+ self.shutdown()
+
+ def connect_timeout(self, signum, frame):
+ msg = 'persistent connection idle timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and ' \
+ 'Troubleshooting Guide.' % self.connection.get_option('persistent_connect_timeout')
+ display.display(msg, log_only=True)
+ raise Exception(msg)
+
+ def command_timeout(self, signum, frame):
+ msg = 'command timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and Troubleshooting Guide.'\
+ % self.connection.get_option('persistent_command_timeout')
+ display.display(msg, log_only=True)
+ raise Exception(msg)
+
+ def handler(self, signum, frame):
+ msg = 'signal handler called with signal %s.' % signum
+ display.display(msg, log_only=True)
+ raise Exception(msg)
+
+ def shutdown(self):
+ """ Shuts down the local domain socket
+ """
+ lock_path = unfrackpath("%s/.ansible_pc_lock_%s" % os.path.split(self.socket_path))
+ if os.path.exists(self.socket_path):
+ try:
+ if self.sock:
+ self.sock.close()
+ if self.connection:
+ self.connection.close()
+ if self.connection.get_option("persistent_log_messages"):
+ for _level, message in self.connection.pop_messages():
+ display.display(message, log_only=True)
+ except Exception:
+ pass
+ finally:
+ if os.path.exists(self.socket_path):
+ os.remove(self.socket_path)
+ setattr(self.connection, '_socket_path', None)
+ setattr(self.connection, '_connected', False)
+
+ if os.path.exists(lock_path):
+ os.remove(lock_path)
+
+ display.display('shutdown complete', log_only=True)
+
+
+def main(args=None):
+ """ Called to initiate the connect to the remote device
+ """
+
+ parser = opt_help.create_base_parser(prog='ansible-connection')
+ opt_help.add_verbosity_options(parser)
+ parser.add_argument('playbook_pid')
+ parser.add_argument('task_uuid')
+ args = parser.parse_args(args[1:] if args is not None else args)
+
+ # initialize verbosity
+ display.verbosity = args.verbosity
+
+ rc = 0
+ result = {}
+ messages = list()
+ socket_path = None
+
+ # Need stdin as a byte stream
+ stdin = sys.stdin.buffer
+
+ # Note: update the below log capture code after Display.display() is refactored.
+ saved_stdout = sys.stdout
+ sys.stdout = io.StringIO()
+
+ try:
+ # read the play context data via stdin, which means depickling it
+ opts_data = read_stream(stdin)
+ init_data = read_stream(stdin)
+
+ pc_data = pickle.loads(init_data, encoding='bytes')
+ options = pickle.loads(opts_data, encoding='bytes')
+
+ play_context = PlayContext()
+ play_context.deserialize(pc_data)
+
+ except Exception as e:
+ rc = 1
+ result.update({
+ 'error': to_text(e),
+ 'exception': traceback.format_exc()
+ })
+
+ if rc == 0:
+ ssh = connection_loader.get('ssh', class_only=True)
+ ansible_playbook_pid = args.playbook_pid
+ task_uuid = args.task_uuid
+ cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user, play_context.connection, ansible_playbook_pid)
+ # create the persistent connection dir if need be and create the paths
+ # which we will be using later
+ tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
+ makedirs_safe(tmp_path)
+
+ socket_path = unfrackpath(cp % dict(directory=tmp_path))
+ lock_path = unfrackpath("%s/.ansible_pc_lock_%s" % os.path.split(socket_path))
+
+ with file_lock(lock_path):
+ if not os.path.exists(socket_path):
+ messages.append(('vvvv', 'local domain socket does not exist, starting it'))
+ original_path = os.getcwd()
+ r, w = os.pipe()
+ pid = fork_process()
+
+ if pid == 0:
+ try:
+ os.close(r)
+ wfd = os.fdopen(w, 'w')
+ process = ConnectionProcess(wfd, play_context, socket_path, original_path, task_uuid, ansible_playbook_pid)
+ process.start(options)
+ except Exception:
+ messages.append(('error', traceback.format_exc()))
+ rc = 1
+
+ if rc == 0:
+ process.run()
+ else:
+ process.shutdown()
+
+ sys.exit(rc)
+
+ else:
+ os.close(w)
+ rfd = os.fdopen(r, 'r')
+ data = json.loads(rfd.read(), cls=AnsibleJSONDecoder)
+ messages.extend(data.pop('messages'))
+ result.update(data)
+
+ else:
+ messages.append(('vvvv', 'found existing local domain socket, using it!'))
+ conn = Connection(socket_path)
+ try:
+ conn.set_options(direct=options)
+ except ConnectionError as exc:
+ messages.append(('debug', to_text(exc)))
+ raise ConnectionError('Unable to decode JSON from response set_options. See the debug log for more information.')
+ pc_data = to_text(init_data)
+ try:
+ conn.update_play_context(pc_data)
+ conn.set_check_prompt(task_uuid)
+ except Exception as exc:
+ # Only network_cli has update_play context and set_check_prompt, so missing this is
+ # not fatal e.g. netconf
+ if isinstance(exc, ConnectionError) and getattr(exc, 'code', None) == -32601:
+ pass
+ else:
+ result.update({
+ 'error': to_text(exc),
+ 'exception': traceback.format_exc()
+ })
+
+ if os.path.exists(socket_path):
+ messages.extend(Connection(socket_path).pop_messages())
+ messages.append(('vvvv', sys.stdout.getvalue()))
+ result.update({
+ 'messages': messages,
+ 'socket_path': socket_path
+ })
+
+ sys.stdout = saved_stdout
+ if 'exception' in result:
+ rc = 1
+ sys.stderr.write(json.dumps(result, cls=AnsibleJSONEncoder))
+ else:
+ rc = 0
+ sys.stdout.write(json.dumps(result, cls=AnsibleJSONEncoder))
+
+ sys.exit(rc)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/bin/ansible-console b/bin/ansible-console
new file mode 100755
index 0000000..3125cc4
--- /dev/null
+++ b/bin/ansible-console
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+# Copyright: (c) 2014, Nandor Sivok <dominis@haxor.hu>
+# Copyright: (c) 2016, Redhat Inc
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import atexit
+import cmd
+import getpass
+import readline
+import os
+import sys
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.executor.task_queue_manager import TaskQueueManager
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.parsing.splitter import parse_kv
+from ansible.playbook.play import Play
+from ansible.plugins.list import list_plugins
+from ansible.plugins.loader import module_loader, fragment_loader
+from ansible.utils import plugin_docs
+from ansible.utils.color import stringc
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ConsoleCLI(CLI, cmd.Cmd):
+ '''
+ A REPL that allows for running ad-hoc tasks against a chosen inventory
+ from a nice shell with built-in tab completion (based on dominis'
+ ansible-shell).
+
+ It supports several commands, and you can modify its configuration at
+ runtime:
+
+ - `cd [pattern]`: change host/group (you can use host patterns eg.: app*.dc*:!app01*)
+ - `list`: list available hosts in the current path
+ - `list groups`: list groups included in the current path
+ - `become`: toggle the become flag
+ - `!`: forces shell module instead of the ansible module (!yum update -y)
+ - `verbosity [num]`: set the verbosity level
+ - `forks [num]`: set the number of forks
+ - `become_user [user]`: set the become_user
+ - `remote_user [user]`: set the remote_user
+ - `become_method [method]`: set the privilege escalation method
+ - `check [bool]`: toggle check mode
+ - `diff [bool]`: toggle diff mode
+ - `timeout [integer]`: set the timeout of tasks in seconds (0 to disable)
+ - `help [command/module]`: display documentation for the command or module
+ - `exit`: exit ansible-console
+ '''
+
+ name = 'ansible-console'
+ modules = [] # type: list[str] | None
+ ARGUMENTS = {'host-pattern': 'A name of a group in the inventory, a shell-like glob '
+ 'selecting hosts in inventory or any combination of the two separated by commas.'}
+
+ # use specific to console, but fallback to highlight for backwards compatibility
+ NORMAL_PROMPT = C.COLOR_CONSOLE_PROMPT or C.COLOR_HIGHLIGHT
+
+ def __init__(self, args):
+
+ super(ConsoleCLI, self).__init__(args)
+
+ self.intro = 'Welcome to the ansible console. Type help or ? to list commands.\n'
+
+ self.groups = []
+ self.hosts = []
+ self.pattern = None
+ self.variable_manager = None
+ self.loader = None
+ self.passwords = dict()
+
+ self.cwd = '*'
+
+ # Defaults for these are set from the CLI in run()
+ self.remote_user = None
+ self.become = None
+ self.become_user = None
+ self.become_method = None
+ self.check_mode = None
+ self.diff = None
+ self.forks = None
+ self.task_timeout = None
+ self.collections = None
+
+ cmd.Cmd.__init__(self)
+
+ def init_parser(self):
+ super(ConsoleCLI, self).init_parser(
+ desc="REPL console for executing Ansible tasks.",
+ epilog="This is not a live session/connection: each task is executed in the background and returns its results."
+ )
+ opt_help.add_runas_options(self.parser)
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_connect_options(self.parser)
+ opt_help.add_check_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_fork_options(self.parser)
+ opt_help.add_module_options(self.parser)
+ opt_help.add_basedir_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
+ opt_help.add_tasknoplay_options(self.parser)
+
+ # options unique to shell
+ self.parser.add_argument('pattern', help='host pattern', metavar='pattern', default='all', nargs='?')
+ self.parser.add_argument('--step', dest='step', action='store_true',
+ help="one-step-at-a-time: confirm each task before running")
+
+ def post_process_args(self, options):
+ options = super(ConsoleCLI, self).post_process_args(options)
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options, runas_opts=True, fork_opts=True)
+ return options
+
+ def get_names(self):
+ return dir(self)
+
+ def cmdloop(self):
+ try:
+ cmd.Cmd.cmdloop(self)
+
+ except KeyboardInterrupt:
+ self.cmdloop()
+
+ except EOFError:
+ self.display("[Ansible-console was exited]")
+ self.do_exit(self)
+
+ def set_prompt(self):
+ login_user = self.remote_user or getpass.getuser()
+ self.selected = self.inventory.list_hosts(self.cwd)
+ prompt = "%s@%s (%d)[f:%s]" % (login_user, self.cwd, len(self.selected), self.forks)
+ if self.become and self.become_user in [None, 'root']:
+ prompt += "# "
+ color = C.COLOR_ERROR
+ else:
+ prompt += "$ "
+ color = self.NORMAL_PROMPT
+ self.prompt = stringc(prompt, color, wrap_nonvisible_chars=True)
+
+ def list_modules(self):
+ return list_plugins('module', self.collections)
+
+ def default(self, line, forceshell=False):
+ """ actually runs modules """
+ if line.startswith("#"):
+ return False
+
+ if not self.cwd:
+ display.error("No host found")
+ return False
+
+ # defaults
+ module = 'shell'
+ module_args = line
+
+ if forceshell is not True:
+ possible_module, *possible_args = line.split()
+ if module_loader.find_plugin(possible_module):
+ # we found module!
+ module = possible_module
+ if possible_args:
+ module_args = ' '.join(possible_args)
+ else:
+ module_args = ''
+
+ if self.callback:
+ cb = self.callback
+ elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default':
+ cb = C.DEFAULT_STDOUT_CALLBACK
+ else:
+ cb = 'minimal'
+
+ result = None
+ try:
+ check_raw = module in C._ACTION_ALLOWS_RAW_ARGS
+ task = dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)), timeout=self.task_timeout)
+ play_ds = dict(
+ name="Ansible Shell",
+ hosts=self.cwd,
+ gather_facts='no',
+ tasks=[task],
+ remote_user=self.remote_user,
+ become=self.become,
+ become_user=self.become_user,
+ become_method=self.become_method,
+ check_mode=self.check_mode,
+ diff=self.diff,
+ collections=self.collections,
+ )
+ play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader)
+ except Exception as e:
+ display.error(u"Unable to build command: %s" % to_text(e))
+ return False
+
+ try:
+ # now create a task queue manager to execute the play
+ self._tqm = None
+ try:
+ self._tqm = TaskQueueManager(
+ inventory=self.inventory,
+ variable_manager=self.variable_manager,
+ loader=self.loader,
+ passwords=self.passwords,
+ stdout_callback=cb,
+ run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
+ run_tree=False,
+ forks=self.forks,
+ )
+
+ result = self._tqm.run(play)
+ display.debug(result)
+ finally:
+ if self._tqm:
+ self._tqm.cleanup()
+ if self.loader:
+ self.loader.cleanup_all_tmp_files()
+
+ if result is None:
+ display.error("No hosts found")
+ return False
+ except KeyboardInterrupt:
+ display.error('User interrupted execution')
+ return False
+ except Exception as e:
+ if self.verbosity >= 3:
+ import traceback
+ display.v(traceback.format_exc())
+ display.error(to_text(e))
+ return False
+
+ def emptyline(self):
+ return
+
+ def do_shell(self, arg):
+ """
+ You can run shell commands through the shell module.
+
+ eg.:
+ shell ps uax | grep java | wc -l
+ shell killall python
+ shell halt -n
+
+ You can use the ! to force the shell module. eg.:
+ !ps aux | grep java | wc -l
+ """
+ self.default(arg, True)
+
+ def help_shell(self):
+ display.display("You can run shell commands through the shell module.")
+
+ def do_forks(self, arg):
+ """Set the number of forks"""
+ if arg:
+ try:
+ forks = int(arg)
+ except TypeError:
+ display.error('Invalid argument for "forks"')
+ self.usage_forks()
+
+ if forks > 0:
+ self.forks = forks
+ self.set_prompt()
+
+ else:
+ display.display('forks must be greater than or equal to 1')
+ else:
+ self.usage_forks()
+
+ def help_forks(self):
+ display.display("Set the number of forks to use per task")
+ self.usage_forks()
+
+ def usage_forks(self):
+ display.display('Usage: forks <number>')
+
+ do_serial = do_forks
+ help_serial = help_forks
+
+ def do_collections(self, arg):
+ """Set list of collections for 'short name' usage"""
+ if arg in ('', 'none'):
+ self.collections = None
+ elif not arg:
+ self.usage_collections()
+ else:
+ collections = arg.split(',')
+ for collection in collections:
+ if self.collections is None:
+ self.collections = []
+ self.collections.append(collection.strip())
+
+ if self.collections:
+ display.v('Collections name search is set to: %s' % ', '.join(self.collections))
+ else:
+ display.v('Collections name search is using defaults')
+
+ def help_collections(self):
+ display.display("Set the collection name search path when using short names for plugins")
+ self.usage_collections()
+
+ def usage_collections(self):
+ display.display('Usage: collections <collection1>[, <collection2> ...]\n Use empty quotes or "none" to reset to default.\n')
+
+ def do_verbosity(self, arg):
+ """Set verbosity level"""
+ if not arg:
+ display.display('Usage: verbosity <number>')
+ else:
+ try:
+ display.verbosity = int(arg)
+ display.v('verbosity level set to %s' % arg)
+ except (TypeError, ValueError) as e:
+ display.error('The verbosity must be a valid integer: %s' % to_text(e))
+
+ def help_verbosity(self):
+ display.display("Set the verbosity level, equivalent to -v for 1 and -vvvv for 4.")
+
+ def do_cd(self, arg):
+ """
+ Change active host/group. You can use hosts patterns as well eg.:
+ cd webservers
+ cd webservers:dbservers
+ cd webservers:!phoenix
+ cd webservers:&staging
+ cd webservers:dbservers:&staging:!phoenix
+ """
+ if not arg:
+ self.cwd = '*'
+ elif arg in '/*':
+ self.cwd = 'all'
+ elif self.inventory.get_hosts(arg):
+ self.cwd = arg
+ else:
+ display.display("no host matched")
+
+ self.set_prompt()
+
+ def help_cd(self):
+ display.display("Change active host/group. ")
+ self.usage_cd()
+
+ def usage_cd(self):
+ display.display("Usage: cd <group>|<host>|<host pattern>")
+
+ def do_list(self, arg):
+ """List the hosts in the current group"""
+ if not arg:
+ for host in self.selected:
+ display.display(host.name)
+ elif arg == 'groups':
+ for group in self.groups:
+ display.display(group)
+ else:
+ display.error('Invalid option passed to "list"')
+ self.help_list()
+
+ def help_list(self):
+ display.display("List the hosts in the current group or a list of groups if you add 'groups'.")
+
+ def do_become(self, arg):
+ """Toggle whether plays run with become"""
+ if arg:
+ self.become = boolean(arg, strict=False)
+ display.v("become changed to %s" % self.become)
+ self.set_prompt()
+ else:
+ display.display("Please specify become value, e.g. `become yes`")
+
+ def help_become(self):
+ display.display("Toggle whether the tasks are run with become")
+
+ def do_remote_user(self, arg):
+ """Given a username, set the remote user plays are run by"""
+ if arg:
+ self.remote_user = arg
+ self.set_prompt()
+ else:
+ display.display("Please specify a remote user, e.g. `remote_user root`")
+
+ def help_remote_user(self):
+ display.display("Set the user for use as login to the remote target")
+
+ def do_become_user(self, arg):
+ """Given a username, set the user that plays are run by when using become"""
+ if arg:
+ self.become_user = arg
+ else:
+ display.display("Please specify a user, e.g. `become_user jenkins`")
+ display.v("Current user is %s" % self.become_user)
+ self.set_prompt()
+
+ def help_become_user(self):
+ display.display("Set the user for use with privilege escalation (which remote user attempts to 'become' when become is enabled)")
+
+ def do_become_method(self, arg):
+ """Given a become_method, set the privilege escalation method when using become"""
+ if arg:
+ self.become_method = arg
+ display.v("become_method changed to %s" % self.become_method)
+ else:
+ display.display("Please specify a become_method, e.g. `become_method su`")
+ display.v("Current become_method is %s" % self.become_method)
+
+ def help_become_method(self):
+ display.display("Set the privilege escalation plugin to use when become is enabled")
+
+ def do_check(self, arg):
+ """Toggle whether plays run with check mode"""
+ if arg:
+ self.check_mode = boolean(arg, strict=False)
+ display.display("check mode changed to %s" % self.check_mode)
+ else:
+ display.display("Please specify check mode value, e.g. `check yes`")
+ display.v("check mode is currently %s." % self.check_mode)
+
+ def help_check(self):
+ display.display("Toggle check_mode for the tasks")
+
+ def do_diff(self, arg):
+ """Toggle whether plays run with diff"""
+ if arg:
+ self.diff = boolean(arg, strict=False)
+ display.display("diff mode changed to %s" % self.diff)
+ else:
+ display.display("Please specify a diff value , e.g. `diff yes`")
+ display.v("diff mode is currently %s" % self.diff)
+
+ def help_diff(self):
+ display.display("Toggle diff output for the tasks")
+
+ def do_timeout(self, arg):
+ """Set the timeout"""
+ if arg:
+ try:
+ timeout = int(arg)
+ if timeout < 0:
+ display.error('The timeout must be greater than or equal to 1, use 0 to disable')
+ else:
+ self.task_timeout = timeout
+ except (TypeError, ValueError) as e:
+ display.error('The timeout must be a valid positive integer, or 0 to disable: %s' % to_text(e))
+ else:
+ self.usage_timeout()
+
+ def help_timeout(self):
+ display.display("Set task timeout in seconds")
+ self.usage_timeout()
+
+ def usage_timeout(self):
+ display.display('Usage: timeout <seconds>')
+
+ def do_exit(self, args):
+ """Exits from the console"""
+ sys.stdout.write('\nAnsible-console was exited.\n')
+ return -1
+
+ def help_exit(self):
+ display.display("LEAVE!")
+
+ do_EOF = do_exit
+ help_EOF = help_exit
+
+ def helpdefault(self, module_name):
+ if module_name:
+ in_path = module_loader.find_plugin(module_name)
+ if in_path:
+ oc, a, _dummy1, _dummy2 = plugin_docs.get_docstring(in_path, fragment_loader)
+ if oc:
+ display.display(oc['short_description'])
+ display.display('Parameters:')
+ for opt in oc['options'].keys():
+ display.display(' ' + stringc(opt, self.NORMAL_PROMPT) + ' ' + oc['options'][opt]['description'][0])
+ else:
+ display.error('No documentation found for %s.' % module_name)
+ else:
+ display.error('%s is not a valid command, use ? to list all valid commands.' % module_name)
+
+ def help_help(self):
+ display.warning("Don't be redundant!")
+
+ def complete_cd(self, text, line, begidx, endidx):
+ mline = line.partition(' ')[2]
+ offs = len(mline) - len(text)
+
+ if self.cwd in ('all', '*', '\\'):
+ completions = self.hosts + self.groups
+ else:
+ completions = [x.name for x in self.inventory.list_hosts(self.cwd)]
+
+ return [to_native(s)[offs:] for s in completions if to_native(s).startswith(to_native(mline))]
+
+ def completedefault(self, text, line, begidx, endidx):
+ if line.split()[0] in self.list_modules():
+ mline = line.split(' ')[-1]
+ offs = len(mline) - len(text)
+ completions = self.module_args(line.split()[0])
+
+ return [s[offs:] + '=' for s in completions if s.startswith(mline)]
+
+ def module_args(self, module_name):
+ in_path = module_loader.find_plugin(module_name)
+ oc, a, _dummy1, _dummy2 = plugin_docs.get_docstring(in_path, fragment_loader, is_module=True)
+ return list(oc['options'].keys())
+
+ def run(self):
+
+ super(ConsoleCLI, self).run()
+
+ sshpass = None
+ becomepass = None
+
+ # hosts
+ self.pattern = context.CLIARGS['pattern']
+ self.cwd = self.pattern
+
+ # Defaults from the command line
+ self.remote_user = context.CLIARGS['remote_user']
+ self.become = context.CLIARGS['become']
+ self.become_user = context.CLIARGS['become_user']
+ self.become_method = context.CLIARGS['become_method']
+ self.check_mode = context.CLIARGS['check']
+ self.diff = context.CLIARGS['diff']
+ self.forks = context.CLIARGS['forks']
+ self.task_timeout = context.CLIARGS['task_timeout']
+
+ # set module path if needed
+ if context.CLIARGS['module_path']:
+ for path in context.CLIARGS['module_path']:
+ if path:
+ module_loader.add_directory(path)
+
+ # dynamically add 'cannonical' modules as commands, aliases coudld be used and dynamically loaded
+ self.modules = self.list_modules()
+ for module in self.modules:
+ setattr(self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg))
+ setattr(self, 'help_' + module, lambda module=module: self.helpdefault(module))
+
+ (sshpass, becomepass) = self.ask_passwords()
+ self.passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
+
+ self.loader, self.inventory, self.variable_manager = self._play_prereqs()
+
+ hosts = self.get_host_list(self.inventory, context.CLIARGS['subset'], self.pattern)
+
+ self.groups = self.inventory.list_groups()
+ self.hosts = [x.name for x in hosts]
+
+ # This hack is to work around readline issues on a mac:
+ # http://stackoverflow.com/a/7116997/541202
+ if 'libedit' in readline.__doc__:
+ readline.parse_and_bind("bind ^I rl_complete")
+ else:
+ readline.parse_and_bind("tab: complete")
+
+ histfile = os.path.join(os.path.expanduser("~"), ".ansible-console_history")
+ try:
+ readline.read_history_file(histfile)
+ except IOError:
+ pass
+
+ atexit.register(readline.write_history_file, histfile)
+ self.set_prompt()
+ self.cmdloop()
+
+ def __getattr__(self, name):
+ ''' handle not found to populate dynamically a module function if module matching name exists '''
+ attr = None
+
+ if name.startswith('do_'):
+ module = name.replace('do_', '')
+ if module_loader.find_plugin(module):
+ setattr(self, name, lambda arg, module=module: self.default(module + ' ' + arg))
+ attr = object.__getattr__(self, name)
+ elif name.startswith('help_'):
+ module = name.replace('help_', '')
+ if module_loader.find_plugin(module):
+ setattr(self, name, lambda module=module: self.helpdefault(module))
+ attr = object.__getattr__(self, name)
+
+ if attr is None:
+ raise AttributeError(f"{self.__class__} does not have a {name} attribute")
+
+ return attr
+
+
+def main(args=None):
+ ConsoleCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/bin/ansible-doc b/bin/ansible-doc
new file mode 100755
index 0000000..8036530
--- /dev/null
+++ b/bin/ansible-doc
@@ -0,0 +1,1393 @@
+#!/usr/bin/env python
+# Copyright: (c) 2014, James Tanner <tanner.jc@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import pkgutil
+import os
+import os.path
+import re
+import textwrap
+import traceback
+
+import ansible.plugins.loader as plugin_loader
+
+from pathlib import Path
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.collections.list import list_collection_dirs
+from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError, AnsiblePluginNotFound
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common.collections import is_sequence
+from ansible.module_utils.common.json import json_dump
+from ansible.module_utils.common.yaml import yaml_dump
+from ansible.module_utils.compat import importlib
+from ansible.module_utils.six import string_types
+from ansible.parsing.plugin_docs import read_docstub
+from ansible.parsing.utils.yaml import from_yaml
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.plugins.list import list_plugins
+from ansible.plugins.loader import action_loader, fragment_loader
+from ansible.utils.collection_loader import AnsibleCollectionConfig, AnsibleCollectionRef
+from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
+from ansible.utils.display import Display
+from ansible.utils.plugin_docs import get_plugin_docs, get_docstring, get_versioned_doclink
+
+display = Display()
+
+
+TARGET_OPTIONS = C.DOCUMENTABLE_PLUGINS + ('role', 'keyword',)
+PB_OBJECTS = ['Play', 'Role', 'Block', 'Task']
+PB_LOADED = {}
+SNIPPETS = ['inventory', 'lookup', 'module']
+
+
+def add_collection_plugins(plugin_list, plugin_type, coll_filter=None):
+ display.deprecated("add_collection_plugins method, use ansible.plugins.list functions instead.", version='2.17')
+ plugin_list.update(list_plugins(plugin_type, coll_filter))
+
+
+def jdump(text):
+ try:
+ display.display(json_dump(text))
+ except TypeError as e:
+ display.vvv(traceback.format_exc())
+ raise AnsibleError('We could not convert all the documentation into JSON as there was a conversion issue: %s' % to_native(e))
+
+
+class RoleMixin(object):
+ """A mixin containing all methods relevant to role argument specification functionality.
+
+ Note: The methods for actual display of role data are not present here.
+ """
+
+ # Potential locations of the role arg spec file in the meta subdir, with main.yml
+ # having the lowest priority.
+ ROLE_ARGSPEC_FILES = ['argument_specs' + e for e in C.YAML_FILENAME_EXTENSIONS] + ["main" + e for e in C.YAML_FILENAME_EXTENSIONS]
+
+ def _load_argspec(self, role_name, collection_path=None, role_path=None):
+ """Load the role argument spec data from the source file.
+
+ :param str role_name: The name of the role for which we want the argspec data.
+ :param str collection_path: Path to the collection containing the role. This
+ will be None for standard roles.
+ :param str role_path: Path to the standard role. This will be None for
+ collection roles.
+
+ We support two files containing the role arg spec data: either meta/main.yml
+ or meta/argument_spec.yml. The argument_spec.yml file will take precedence
+ over the meta/main.yml file, if it exists. Data is NOT combined between the
+ two files.
+
+ :returns: A dict of all data underneath the ``argument_specs`` top-level YAML
+ key in the argspec data file. Empty dict is returned if there is no data.
+ """
+
+ if collection_path:
+ meta_path = os.path.join(collection_path, 'roles', role_name, 'meta')
+ elif role_path:
+ meta_path = os.path.join(role_path, 'meta')
+ else:
+ raise AnsibleError("A path is required to load argument specs for role '%s'" % role_name)
+
+ path = None
+
+ # Check all potential spec files
+ for specfile in self.ROLE_ARGSPEC_FILES:
+ full_path = os.path.join(meta_path, specfile)
+ if os.path.exists(full_path):
+ path = full_path
+ break
+
+ if path is None:
+ return {}
+
+ try:
+ with open(path, 'r') as f:
+ data = from_yaml(f.read(), file_name=path)
+ if data is None:
+ data = {}
+ return data.get('argument_specs', {})
+ except (IOError, OSError) as e:
+ raise AnsibleParserError("An error occurred while trying to read the file '%s': %s" % (path, to_native(e)), orig_exc=e)
+
+ def _find_all_normal_roles(self, role_paths, name_filters=None):
+ """Find all non-collection roles that have an argument spec file.
+
+ Note that argument specs do not actually need to exist within the spec file.
+
+ :param role_paths: A tuple of one or more role paths. When a role with the same name
+ is found in multiple paths, only the first-found role is returned.
+ :param name_filters: A tuple of one or more role names used to filter the results.
+
+ :returns: A set of tuples consisting of: role name, full role path
+ """
+ found = set()
+ found_names = set()
+
+ for path in role_paths:
+ if not os.path.isdir(path):
+ continue
+
+ # Check each subdir for an argument spec file
+ for entry in os.listdir(path):
+ role_path = os.path.join(path, entry)
+
+ # Check all potential spec files
+ for specfile in self.ROLE_ARGSPEC_FILES:
+ full_path = os.path.join(role_path, 'meta', specfile)
+ if os.path.exists(full_path):
+ if name_filters is None or entry in name_filters:
+ if entry not in found_names:
+ found.add((entry, role_path))
+ found_names.add(entry)
+ # select first-found
+ break
+ return found
+
+ def _find_all_collection_roles(self, name_filters=None, collection_filter=None):
+ """Find all collection roles with an argument spec file.
+
+ Note that argument specs do not actually need to exist within the spec file.
+
+ :param name_filters: A tuple of one or more role names used to filter the results. These
+ might be fully qualified with the collection name (e.g., community.general.roleA)
+ or not (e.g., roleA).
+
+ :param collection_filter: A string containing the FQCN of a collection which will be
+ used to limit results. This filter will take precedence over the name_filters.
+
+ :returns: A set of tuples consisting of: role name, collection name, collection path
+ """
+ found = set()
+ b_colldirs = list_collection_dirs(coll_filter=collection_filter)
+ for b_path in b_colldirs:
+ path = to_text(b_path, errors='surrogate_or_strict')
+ collname = _get_collection_name_from_path(b_path)
+
+ roles_dir = os.path.join(path, 'roles')
+ if os.path.exists(roles_dir):
+ for entry in os.listdir(roles_dir):
+
+ # Check all potential spec files
+ for specfile in self.ROLE_ARGSPEC_FILES:
+ full_path = os.path.join(roles_dir, entry, 'meta', specfile)
+ if os.path.exists(full_path):
+ if name_filters is None:
+ found.add((entry, collname, path))
+ else:
+ # Name filters might contain a collection FQCN or not.
+ for fqcn in name_filters:
+ if len(fqcn.split('.')) == 3:
+ (ns, col, role) = fqcn.split('.')
+ if '.'.join([ns, col]) == collname and entry == role:
+ found.add((entry, collname, path))
+ elif fqcn == entry:
+ found.add((entry, collname, path))
+ break
+ return found
+
+ def _build_summary(self, role, collection, argspec):
+ """Build a summary dict for a role.
+
+ Returns a simplified role arg spec containing only the role entry points and their
+ short descriptions, and the role collection name (if applicable).
+
+ :param role: The simple role name.
+ :param collection: The collection containing the role (None or empty string if N/A).
+ :param argspec: The complete role argspec data dict.
+
+ :returns: A tuple with the FQCN role name and a summary dict.
+ """
+ if collection:
+ fqcn = '.'.join([collection, role])
+ else:
+ fqcn = role
+ summary = {}
+ summary['collection'] = collection
+ summary['entry_points'] = {}
+ for ep in argspec.keys():
+ entry_spec = argspec[ep] or {}
+ summary['entry_points'][ep] = entry_spec.get('short_description', '')
+ return (fqcn, summary)
+
+ def _build_doc(self, role, path, collection, argspec, entry_point):
+ if collection:
+ fqcn = '.'.join([collection, role])
+ else:
+ fqcn = role
+ doc = {}
+ doc['path'] = path
+ doc['collection'] = collection
+ doc['entry_points'] = {}
+ for ep in argspec.keys():
+ if entry_point is None or ep == entry_point:
+ entry_spec = argspec[ep] or {}
+ doc['entry_points'][ep] = entry_spec
+
+ # If we didn't add any entry points (b/c of filtering), ignore this entry.
+ if len(doc['entry_points'].keys()) == 0:
+ doc = None
+
+ return (fqcn, doc)
+
+ def _create_role_list(self, fail_on_errors=True):
+ """Return a dict describing the listing of all roles with arg specs.
+
+ :param role_paths: A tuple of one or more role paths.
+
+ :returns: A dict indexed by role name, with 'collection' and 'entry_points' keys per role.
+
+ Example return:
+
+ results = {
+ 'roleA': {
+ 'collection': '',
+ 'entry_points': {
+ 'main': 'Short description for main'
+ }
+ },
+ 'a.b.c.roleB': {
+ 'collection': 'a.b.c',
+ 'entry_points': {
+ 'main': 'Short description for main',
+ 'alternate': 'Short description for alternate entry point'
+ }
+ 'x.y.z.roleB': {
+ 'collection': 'x.y.z',
+ 'entry_points': {
+ 'main': 'Short description for main',
+ }
+ },
+ }
+ """
+ roles_path = self._get_roles_path()
+ collection_filter = self._get_collection_filter()
+ if not collection_filter:
+ roles = self._find_all_normal_roles(roles_path)
+ else:
+ roles = []
+ collroles = self._find_all_collection_roles(collection_filter=collection_filter)
+
+ result = {}
+
+ for role, role_path in roles:
+ try:
+ argspec = self._load_argspec(role, role_path=role_path)
+ fqcn, summary = self._build_summary(role, '', argspec)
+ result[fqcn] = summary
+ except Exception as e:
+ if fail_on_errors:
+ raise
+ result[role] = {
+ 'error': 'Error while loading role argument spec: %s' % to_native(e),
+ }
+
+ for role, collection, collection_path in collroles:
+ try:
+ argspec = self._load_argspec(role, collection_path=collection_path)
+ fqcn, summary = self._build_summary(role, collection, argspec)
+ result[fqcn] = summary
+ except Exception as e:
+ if fail_on_errors:
+ raise
+ result['%s.%s' % (collection, role)] = {
+ 'error': 'Error while loading role argument spec: %s' % to_native(e),
+ }
+
+ return result
+
+ def _create_role_doc(self, role_names, entry_point=None, fail_on_errors=True):
+ """
+ :param role_names: A tuple of one or more role names.
+ :param role_paths: A tuple of one or more role paths.
+ :param entry_point: A role entry point name for filtering.
+ :param fail_on_errors: When set to False, include errors in the JSON output instead of raising errors
+
+ :returns: A dict indexed by role name, with 'collection', 'entry_points', and 'path' keys per role.
+ """
+ roles_path = self._get_roles_path()
+ roles = self._find_all_normal_roles(roles_path, name_filters=role_names)
+ collroles = self._find_all_collection_roles(name_filters=role_names)
+
+ result = {}
+
+ for role, role_path in roles:
+ try:
+ argspec = self._load_argspec(role, role_path=role_path)
+ fqcn, doc = self._build_doc(role, role_path, '', argspec, entry_point)
+ if doc:
+ result[fqcn] = doc
+ except Exception as e: # pylint:disable=broad-except
+ result[role] = {
+ 'error': 'Error while processing role: %s' % to_native(e),
+ }
+
+ for role, collection, collection_path in collroles:
+ try:
+ argspec = self._load_argspec(role, collection_path=collection_path)
+ fqcn, doc = self._build_doc(role, collection_path, collection, argspec, entry_point)
+ if doc:
+ result[fqcn] = doc
+ except Exception as e: # pylint:disable=broad-except
+ result['%s.%s' % (collection, role)] = {
+ 'error': 'Error while processing role: %s' % to_native(e),
+ }
+
+ return result
+
+
+class DocCLI(CLI, RoleMixin):
+ ''' displays information on modules installed in Ansible libraries.
+ It displays a terse listing of plugins and their short descriptions,
+ provides a printout of their DOCUMENTATION strings,
+ and it can create a short "snippet" which can be pasted into a playbook. '''
+
+ name = 'ansible-doc'
+
+ # default ignore list for detailed views
+ IGNORE = ('module', 'docuri', 'version_added', 'version_added_collection', 'short_description', 'now_date', 'plainexamples', 'returndocs', 'collection')
+
+ # Warning: If you add more elements here, you also need to add it to the docsite build (in the
+ # ansible-community/antsibull repo)
+ _ITALIC = re.compile(r"\bI\(([^)]+)\)")
+ _BOLD = re.compile(r"\bB\(([^)]+)\)")
+ _MODULE = re.compile(r"\bM\(([^)]+)\)")
+ _LINK = re.compile(r"\bL\(([^)]+), *([^)]+)\)")
+ _URL = re.compile(r"\bU\(([^)]+)\)")
+ _REF = re.compile(r"\bR\(([^)]+), *([^)]+)\)")
+ _CONST = re.compile(r"\bC\(([^)]+)\)")
+ _RULER = re.compile(r"\bHORIZONTALLINE\b")
+
+ # rst specific
+ _RST_NOTE = re.compile(r".. note::")
+ _RST_SEEALSO = re.compile(r".. seealso::")
+ _RST_ROLES = re.compile(r":\w+?:`")
+ _RST_DIRECTIVES = re.compile(r".. \w+?::")
+
+ def __init__(self, args):
+
+ super(DocCLI, self).__init__(args)
+ self.plugin_list = set()
+
+ @classmethod
+ def find_plugins(cls, path, internal, plugin_type, coll_filter=None):
+ display.deprecated("find_plugins method as it is incomplete/incorrect. use ansible.plugins.list functions instead.", version='2.17')
+ return list_plugins(plugin_type, coll_filter, [path]).keys()
+
+ @classmethod
+ def tty_ify(cls, text):
+
+ # general formatting
+ t = cls._ITALIC.sub(r"`\1'", text) # I(word) => `word'
+ t = cls._BOLD.sub(r"*\1*", t) # B(word) => *word*
+ t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
+ t = cls._URL.sub(r"\1", t) # U(word) => word
+ t = cls._LINK.sub(r"\1 <\2>", t) # L(word, url) => word <url>
+ t = cls._REF.sub(r"\1", t) # R(word, sphinx-ref) => word
+ t = cls._CONST.sub(r"`\1'", t) # C(word) => `word'
+ t = cls._RULER.sub("\n{0}\n".format("-" * 13), t) # HORIZONTALLINE => -------
+
+ # remove rst
+ t = cls._RST_SEEALSO.sub(r"See also:", t) # seealso to See also:
+ t = cls._RST_NOTE.sub(r"Note:", t) # .. note:: to note:
+ t = cls._RST_ROLES.sub(r"`", t) # remove :ref: and other tags, keep tilde to match ending one
+ t = cls._RST_DIRECTIVES.sub(r"", t) # remove .. stuff:: in general
+
+ return t
+
+ def init_parser(self):
+
+ coll_filter = 'A supplied argument will be used for filtering, can be a namespace or full collection name.'
+
+ super(DocCLI, self).init_parser(
+ desc="plugin documentation tool",
+ epilog="See man pages for Ansible CLI options or website for tutorials https://docs.ansible.com"
+ )
+ opt_help.add_module_options(self.parser)
+ opt_help.add_basedir_options(self.parser)
+
+ # targets
+ self.parser.add_argument('args', nargs='*', help='Plugin', metavar='plugin')
+
+ self.parser.add_argument("-t", "--type", action="store", default='module', dest='type',
+ help='Choose which plugin type (defaults to "module"). '
+ 'Available plugin types are : {0}'.format(TARGET_OPTIONS),
+ choices=TARGET_OPTIONS)
+
+ # formatting
+ self.parser.add_argument("-j", "--json", action="store_true", default=False, dest='json_format',
+ help='Change output into json format.')
+
+ # TODO: warn if not used with -t roles
+ # role-specific options
+ self.parser.add_argument("-r", "--roles-path", dest='roles_path', default=C.DEFAULT_ROLES_PATH,
+ type=opt_help.unfrack_path(pathsep=True),
+ action=opt_help.PrependListAction,
+ help='The path to the directory containing your roles.')
+
+ # modifiers
+ exclusive = self.parser.add_mutually_exclusive_group()
+ # TODO: warn if not used with -t roles
+ exclusive.add_argument("-e", "--entry-point", dest="entry_point",
+ help="Select the entry point for role(s).")
+
+ # TODO: warn with --json as it is incompatible
+ exclusive.add_argument("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
+ help='Show playbook snippet for these plugin types: %s' % ', '.join(SNIPPETS))
+
+ # TODO: warn when arg/plugin is passed
+ exclusive.add_argument("-F", "--list_files", action="store_true", default=False, dest="list_files",
+ help='Show plugin names and their source files without summaries (implies --list). %s' % coll_filter)
+ exclusive.add_argument("-l", "--list", action="store_true", default=False, dest='list_dir',
+ help='List available plugins. %s' % coll_filter)
+ exclusive.add_argument("--metadata-dump", action="store_true", default=False, dest='dump',
+ help='**For internal use only** Dump json metadata for all entries, ignores other options.')
+
+ self.parser.add_argument("--no-fail-on-errors", action="store_true", default=False, dest='no_fail_on_errors',
+ help='**For internal use only** Only used for --metadata-dump. '
+ 'Do not fail on errors. Report the error message in the JSON instead.')
+
+ def post_process_args(self, options):
+ options = super(DocCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+
+ return options
+
+ def display_plugin_list(self, results):
+
+ # format for user
+ displace = max(len(x) for x in results.keys())
+ linelimit = display.columns - displace - 5
+ text = []
+ deprecated = []
+
+ # format display per option
+ if context.CLIARGS['list_files']:
+ # list plugin file names
+ for plugin in sorted(results.keys()):
+ filename = to_native(results[plugin])
+
+ # handle deprecated for builtin/legacy
+ pbreak = plugin.split('.')
+ if pbreak[-1].startswith('_') and pbreak[0] == 'ansible' and pbreak[1] in ('builtin', 'legacy'):
+ pbreak[-1] = pbreak[-1][1:]
+ plugin = '.'.join(pbreak)
+ deprecated.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(filename), filename))
+ else:
+ text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(filename), filename))
+ else:
+ # list plugin names and short desc
+ for plugin in sorted(results.keys()):
+ desc = DocCLI.tty_ify(results[plugin])
+
+ if len(desc) > linelimit:
+ desc = desc[:linelimit] + '...'
+
+ pbreak = plugin.split('.')
+ if pbreak[-1].startswith('_'): # Handle deprecated # TODO: add mark for deprecated collection plugins
+ pbreak[-1] = pbreak[-1][1:]
+ plugin = '.'.join(pbreak)
+ deprecated.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(desc), desc))
+ else:
+ text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(desc), desc))
+
+ if len(deprecated) > 0:
+ text.append("\nDEPRECATED:")
+ text.extend(deprecated)
+
+ # display results
+ DocCLI.pager("\n".join(text))
+
+ def _display_available_roles(self, list_json):
+ """Display all roles we can find with a valid argument specification.
+
+ Output is: fqcn role name, entry point, short description
+ """
+ roles = list(list_json.keys())
+ entry_point_names = set()
+ for role in roles:
+ for entry_point in list_json[role]['entry_points'].keys():
+ entry_point_names.add(entry_point)
+
+ max_role_len = 0
+ max_ep_len = 0
+
+ if roles:
+ max_role_len = max(len(x) for x in roles)
+ if entry_point_names:
+ max_ep_len = max(len(x) for x in entry_point_names)
+
+ linelimit = display.columns - max_role_len - max_ep_len - 5
+ text = []
+
+ for role in sorted(roles):
+ for entry_point, desc in list_json[role]['entry_points'].items():
+ if len(desc) > linelimit:
+ desc = desc[:linelimit] + '...'
+ text.append("%-*s %-*s %s" % (max_role_len, role,
+ max_ep_len, entry_point,
+ desc))
+
+ # display results
+ DocCLI.pager("\n".join(text))
+
+ def _display_role_doc(self, role_json):
+ roles = list(role_json.keys())
+ text = []
+ for role in roles:
+ text += self.get_role_man_text(role, role_json[role])
+
+ # display results
+ DocCLI.pager("\n".join(text))
+
+ @staticmethod
+ def _list_keywords():
+ return from_yaml(pkgutil.get_data('ansible', 'keyword_desc.yml'))
+
+ @staticmethod
+ def _get_keywords_docs(keys):
+
+ data = {}
+ descs = DocCLI._list_keywords()
+ for key in keys:
+
+ if key.startswith('with_'):
+ # simplify loops, dont want to handle every with_<lookup> combo
+ keyword = 'loop'
+ elif key == 'async':
+ # cause async became reserved in python we had to rename internally
+ keyword = 'async_val'
+ else:
+ keyword = key
+
+ try:
+ # if no desc, typeerror raised ends this block
+ kdata = {'description': descs[key]}
+
+ # get playbook objects for keyword and use first to get keyword attributes
+ kdata['applies_to'] = []
+ for pobj in PB_OBJECTS:
+ if pobj not in PB_LOADED:
+ obj_class = 'ansible.playbook.%s' % pobj.lower()
+ loaded_class = importlib.import_module(obj_class)
+ PB_LOADED[pobj] = getattr(loaded_class, pobj, None)
+
+ if keyword in PB_LOADED[pobj].fattributes:
+ kdata['applies_to'].append(pobj)
+
+ # we should only need these once
+ if 'type' not in kdata:
+
+ fa = PB_LOADED[pobj].fattributes.get(keyword)
+ if getattr(fa, 'private'):
+ kdata = {}
+ raise KeyError
+
+ kdata['type'] = getattr(fa, 'isa', 'string')
+
+ if keyword.endswith('when') or keyword in ('until',):
+ # TODO: make this a field attribute property,
+ # would also helps with the warnings on {{}} stacking
+ kdata['template'] = 'implicit'
+ elif getattr(fa, 'static'):
+ kdata['template'] = 'static'
+ else:
+ kdata['template'] = 'explicit'
+
+ # those that require no processing
+ for visible in ('alias', 'priority'):
+ kdata[visible] = getattr(fa, visible)
+
+ # remove None keys
+ for k in list(kdata.keys()):
+ if kdata[k] is None:
+ del kdata[k]
+
+ data[key] = kdata
+
+ except (AttributeError, KeyError) as e:
+ display.warning("Skipping Invalid keyword '%s' specified: %s" % (key, to_text(e)))
+ if display.verbosity >= 3:
+ display.verbose(traceback.format_exc())
+
+ return data
+
+ def _get_collection_filter(self):
+
+ coll_filter = None
+ if len(context.CLIARGS['args']) == 1:
+ coll_filter = context.CLIARGS['args'][0]
+ if not AnsibleCollectionRef.is_valid_collection_name(coll_filter):
+ raise AnsibleError('Invalid collection name (must be of the form namespace.collection): {0}'.format(coll_filter))
+ elif len(context.CLIARGS['args']) > 1:
+ raise AnsibleOptionsError("Only a single collection filter is supported.")
+
+ return coll_filter
+
+ def _list_plugins(self, plugin_type, content):
+
+ results = {}
+ self.plugins = {}
+ loader = DocCLI._prep_loader(plugin_type)
+
+ coll_filter = self._get_collection_filter()
+ self.plugins.update(list_plugins(plugin_type, coll_filter))
+
+ # get appropriate content depending on option
+ if content == 'dir':
+ results = self._get_plugin_list_descriptions(loader)
+ elif content == 'files':
+ results = {k: self.plugins[k][0] for k in self.plugins.keys()}
+ else:
+ results = {k: {} for k in self.plugins.keys()}
+ self.plugin_list = set() # reset for next iteration
+
+ return results
+
+ def _get_plugins_docs(self, plugin_type, names, fail_ok=False, fail_on_errors=True):
+
+ loader = DocCLI._prep_loader(plugin_type)
+
+ # get the docs for plugins in the command line list
+ plugin_docs = {}
+ for plugin in names:
+ doc = {}
+ try:
+ doc, plainexamples, returndocs, metadata = get_plugin_docs(plugin, plugin_type, loader, fragment_loader, (context.CLIARGS['verbosity'] > 0))
+ except AnsiblePluginNotFound as e:
+ display.warning(to_native(e))
+ continue
+ except Exception as e:
+ if not fail_on_errors:
+ plugin_docs[plugin] = {'error': 'Missing documentation or could not parse documentation: %s' % to_native(e)}
+ continue
+ display.vvv(traceback.format_exc())
+ msg = "%s %s missing documentation (or could not parse documentation): %s\n" % (plugin_type, plugin, to_native(e))
+ if fail_ok:
+ display.warning(msg)
+ else:
+ raise AnsibleError(msg)
+
+ if not doc:
+ # The doc section existed but was empty
+ if not fail_on_errors:
+ plugin_docs[plugin] = {'error': 'No valid documentation found'}
+ continue
+
+ docs = DocCLI._combine_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata)
+ if not fail_on_errors:
+ # Check whether JSON serialization would break
+ try:
+ json_dump(docs)
+ except Exception as e: # pylint:disable=broad-except
+ plugin_docs[plugin] = {'error': 'Cannot serialize documentation as JSON: %s' % to_native(e)}
+ continue
+
+ plugin_docs[plugin] = docs
+
+ return plugin_docs
+
+ def _get_roles_path(self):
+ '''
+ Add any 'roles' subdir in playbook dir to the roles search path.
+ And as a last resort, add the playbook dir itself. Order being:
+ - 'roles' subdir of playbook dir
+ - DEFAULT_ROLES_PATH (default in cliargs)
+ - playbook dir (basedir)
+ NOTE: This matches logic in RoleDefinition._load_role_path() method.
+ '''
+ roles_path = context.CLIARGS['roles_path']
+ if context.CLIARGS['basedir'] is not None:
+ subdir = os.path.join(context.CLIARGS['basedir'], "roles")
+ if os.path.isdir(subdir):
+ roles_path = (subdir,) + roles_path
+ roles_path = roles_path + (context.CLIARGS['basedir'],)
+ return roles_path
+
+ @staticmethod
+ def _prep_loader(plugin_type):
+ ''' return a plugint type specific loader '''
+ loader = getattr(plugin_loader, '%s_loader' % plugin_type)
+
+ # add to plugin paths from command line
+ if context.CLIARGS['basedir'] is not None:
+ loader.add_directory(context.CLIARGS['basedir'], with_subdir=True)
+
+ if context.CLIARGS['module_path']:
+ for path in context.CLIARGS['module_path']:
+ if path:
+ loader.add_directory(path)
+
+ # save only top level paths for errors
+ loader._paths = None # reset so we can use subdirs later
+
+ return loader
+
+ def run(self):
+
+ super(DocCLI, self).run()
+
+ basedir = context.CLIARGS['basedir']
+ plugin_type = context.CLIARGS['type'].lower()
+ do_json = context.CLIARGS['json_format'] or context.CLIARGS['dump']
+ listing = context.CLIARGS['list_files'] or context.CLIARGS['list_dir']
+
+ if context.CLIARGS['list_files']:
+ content = 'files'
+ elif context.CLIARGS['list_dir']:
+ content = 'dir'
+ else:
+ content = None
+
+ docs = {}
+
+ if basedir:
+ AnsibleCollectionConfig.playbook_paths = basedir
+
+ if plugin_type not in TARGET_OPTIONS:
+ raise AnsibleOptionsError("Unknown or undocumentable plugin type: %s" % plugin_type)
+
+ if context.CLIARGS['dump']:
+ # we always dump all types, ignore restrictions
+ ptypes = TARGET_OPTIONS
+ docs['all'] = {}
+ for ptype in ptypes:
+
+ no_fail = bool(not context.CLIARGS['no_fail_on_errors'])
+ if ptype == 'role':
+ roles = self._create_role_list(fail_on_errors=no_fail)
+ docs['all'][ptype] = self._create_role_doc(roles.keys(), context.CLIARGS['entry_point'], fail_on_errors=no_fail)
+ elif ptype == 'keyword':
+ names = DocCLI._list_keywords()
+ docs['all'][ptype] = DocCLI._get_keywords_docs(names.keys())
+ else:
+ plugin_names = self._list_plugins(ptype, None)
+ docs['all'][ptype] = self._get_plugins_docs(ptype, plugin_names, fail_ok=(ptype in ('test', 'filter')), fail_on_errors=no_fail)
+ # reset list after each type to avoid polution
+ elif listing:
+ if plugin_type == 'keyword':
+ docs = DocCLI._list_keywords()
+ elif plugin_type == 'role':
+ docs = self._create_role_list()
+ else:
+ docs = self._list_plugins(plugin_type, content)
+ else:
+ # here we require a name
+ if len(context.CLIARGS['args']) == 0:
+ raise AnsibleOptionsError("Missing name(s), incorrect options passed for detailed documentation.")
+
+ if plugin_type == 'keyword':
+ docs = DocCLI._get_keywords_docs(context.CLIARGS['args'])
+ elif plugin_type == 'role':
+ docs = self._create_role_doc(context.CLIARGS['args'], context.CLIARGS['entry_point'])
+ else:
+ # display specific plugin docs
+ docs = self._get_plugins_docs(plugin_type, context.CLIARGS['args'])
+
+ # Display the docs
+ if do_json:
+ jdump(docs)
+ else:
+ text = []
+ if plugin_type in C.DOCUMENTABLE_PLUGINS:
+ if listing and docs:
+ self.display_plugin_list(docs)
+ elif context.CLIARGS['show_snippet']:
+ if plugin_type not in SNIPPETS:
+ raise AnsibleError('Snippets are only available for the following plugin'
+ ' types: %s' % ', '.join(SNIPPETS))
+
+ for plugin, doc_data in docs.items():
+ try:
+ textret = DocCLI.format_snippet(plugin, plugin_type, doc_data['doc'])
+ except ValueError as e:
+ display.warning("Unable to construct a snippet for"
+ " '{0}': {1}".format(plugin, to_text(e)))
+ else:
+ text.append(textret)
+ else:
+ # Some changes to how plain text docs are formatted
+ for plugin, doc_data in docs.items():
+
+ textret = DocCLI.format_plugin_doc(plugin, plugin_type,
+ doc_data['doc'], doc_data['examples'],
+ doc_data['return'], doc_data['metadata'])
+ if textret:
+ text.append(textret)
+ else:
+ display.warning("No valid documentation was retrieved from '%s'" % plugin)
+
+ elif plugin_type == 'role':
+ if context.CLIARGS['list_dir'] and docs:
+ self._display_available_roles(docs)
+ elif docs:
+ self._display_role_doc(docs)
+
+ elif docs:
+ text = DocCLI.tty_ify(DocCLI._dump_yaml(docs))
+
+ if text:
+ DocCLI.pager(''.join(text))
+
+ return 0
+
+ @staticmethod
+ def get_all_plugins_of_type(plugin_type):
+ loader = getattr(plugin_loader, '%s_loader' % plugin_type)
+ paths = loader._get_paths_with_context()
+ plugins = {}
+ for path_context in paths:
+ plugins.update(list_plugins(plugin_type))
+ return sorted(plugins.keys())
+
+ @staticmethod
+ def get_plugin_metadata(plugin_type, plugin_name):
+ # if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
+ loader = getattr(plugin_loader, '%s_loader' % plugin_type)
+ result = loader.find_plugin_with_context(plugin_name, mod_type='.py', ignore_deprecated=True, check_aliases=True)
+ if not result.resolved:
+ raise AnsibleError("unable to load {0} plugin named {1} ".format(plugin_type, plugin_name))
+ filename = result.plugin_resolved_path
+ collection_name = result.plugin_resolved_collection
+
+ try:
+ doc, __, __, __ = get_docstring(filename, fragment_loader, verbose=(context.CLIARGS['verbosity'] > 0),
+ collection_name=collection_name, plugin_type=plugin_type)
+ except Exception:
+ display.vvv(traceback.format_exc())
+ raise AnsibleError("%s %s at %s has a documentation formatting error or is missing documentation." % (plugin_type, plugin_name, filename))
+
+ if doc is None:
+ # Removed plugins don't have any documentation
+ return None
+
+ return dict(
+ name=plugin_name,
+ namespace=DocCLI.namespace_from_plugin_filepath(filename, plugin_name, loader.package_path),
+ description=doc.get('short_description', "UNKNOWN"),
+ version_added=doc.get('version_added', "UNKNOWN")
+ )
+
+ @staticmethod
+ def namespace_from_plugin_filepath(filepath, plugin_name, basedir):
+ if not basedir.endswith('/'):
+ basedir += '/'
+ rel_path = filepath.replace(basedir, '')
+ extension_free = os.path.splitext(rel_path)[0]
+ namespace_only = extension_free.rsplit(plugin_name, 1)[0].strip('/_')
+ clean_ns = namespace_only.replace('/', '.')
+ if clean_ns == '':
+ clean_ns = None
+
+ return clean_ns
+
+ @staticmethod
+ def _combine_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata):
+ # generate extra data
+ if plugin_type == 'module':
+ # is there corresponding action plugin?
+ if plugin in action_loader:
+ doc['has_action'] = True
+ else:
+ doc['has_action'] = False
+
+ # return everything as one dictionary
+ return {'doc': doc, 'examples': plainexamples, 'return': returndocs, 'metadata': metadata}
+
+ @staticmethod
+ def format_snippet(plugin, plugin_type, doc):
+ ''' return heavily commented plugin use to insert into play '''
+ if plugin_type == 'inventory' and doc.get('options', {}).get('plugin'):
+ # these do not take a yaml config that we can write a snippet for
+ raise ValueError('The {0} inventory plugin does not take YAML type config source'
+ ' that can be used with the "auto" plugin so a snippet cannot be'
+ ' created.'.format(plugin))
+
+ text = []
+
+ if plugin_type == 'lookup':
+ text = _do_lookup_snippet(doc)
+
+ elif 'options' in doc:
+ text = _do_yaml_snippet(doc)
+
+ text.append('')
+ return "\n".join(text)
+
+ @staticmethod
+ def format_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata):
+ collection_name = doc['collection']
+
+ # TODO: do we really want this?
+ # add_collection_to_versions_and_dates(doc, '(unknown)', is_module=(plugin_type == 'module'))
+ # remove_current_collection_from_versions_and_dates(doc, collection_name, is_module=(plugin_type == 'module'))
+ # remove_current_collection_from_versions_and_dates(
+ # returndocs, collection_name, is_module=(plugin_type == 'module'), return_docs=True)
+
+ # assign from other sections
+ doc['plainexamples'] = plainexamples
+ doc['returndocs'] = returndocs
+ doc['metadata'] = metadata
+
+ try:
+ text = DocCLI.get_man_text(doc, collection_name, plugin_type)
+ except Exception as e:
+ display.vvv(traceback.format_exc())
+ raise AnsibleError("Unable to retrieve documentation from '%s' due to: %s" % (plugin, to_native(e)), orig_exc=e)
+
+ return text
+
+ def _get_plugin_list_descriptions(self, loader):
+
+ descs = {}
+ for plugin in self.plugins.keys():
+ # TODO: move to plugin itself i.e: plugin.get_desc()
+ doc = None
+ filename = Path(to_native(self.plugins[plugin][0]))
+ docerror = None
+ try:
+ doc = read_docstub(filename)
+ except Exception as e:
+ docerror = e
+
+ # plugin file was empty or had error, lets try other options
+ if doc is None:
+ # handle test/filters that are in file with diff name
+ base = plugin.split('.')[-1]
+ basefile = filename.with_name(base + filename.suffix)
+ for extension in C.DOC_EXTENSIONS:
+ docfile = basefile.with_suffix(extension)
+ try:
+ if docfile.exists():
+ doc = read_docstub(docfile)
+ except Exception as e:
+ docerror = e
+
+ if docerror:
+ display.warning("%s has a documentation formatting error: %s" % (plugin, docerror))
+ continue
+
+ if not doc or not isinstance(doc, dict):
+ desc = 'UNDOCUMENTED'
+ else:
+ desc = doc.get('short_description', 'INVALID SHORT DESCRIPTION').strip()
+
+ descs[plugin] = desc
+
+ return descs
+
+ @staticmethod
+ def print_paths(finder):
+ ''' Returns a string suitable for printing of the search path '''
+
+ # Uses a list to get the order right
+ ret = []
+ for i in finder._get_paths(subdirs=False):
+ i = to_text(i, errors='surrogate_or_strict')
+ if i not in ret:
+ ret.append(i)
+ return os.pathsep.join(ret)
+
+ @staticmethod
+ def _dump_yaml(struct, flow_style=False):
+ return yaml_dump(struct, default_flow_style=flow_style, default_style="''", Dumper=AnsibleDumper).rstrip('\n')
+
+ @staticmethod
+ def _indent_lines(text, indent):
+ return DocCLI.tty_ify('\n'.join([indent + line for line in text.split('\n')]))
+
+ @staticmethod
+ def _format_version_added(version_added, version_added_collection=None):
+ if version_added_collection == 'ansible.builtin':
+ version_added_collection = 'ansible-core'
+ # In ansible-core, version_added can be 'historical'
+ if version_added == 'historical':
+ return 'historical'
+ if version_added_collection:
+ version_added = '%s of %s' % (version_added, version_added_collection)
+ return 'version %s' % (version_added, )
+
+ @staticmethod
+ def add_fields(text, fields, limit, opt_indent, return_values=False, base_indent=''):
+
+ for o in sorted(fields):
+ # Create a copy so we don't modify the original (in case YAML anchors have been used)
+ opt = dict(fields[o])
+
+ # required is used as indicator and removed
+ required = opt.pop('required', False)
+ if not isinstance(required, bool):
+ raise AnsibleError("Incorrect value for 'Required', a boolean is needed.: %s" % required)
+ if required:
+ opt_leadin = "="
+ else:
+ opt_leadin = "-"
+
+ text.append("%s%s %s" % (base_indent, opt_leadin, o))
+
+ # description is specifically formated and can either be string or list of strings
+ if 'description' not in opt:
+ raise AnsibleError("All (sub-)options and return values must have a 'description' field")
+ if is_sequence(opt['description']):
+ for entry_idx, entry in enumerate(opt['description'], 1):
+ if not isinstance(entry, string_types):
+ raise AnsibleError("Expected string in description of %s at index %s, got %s" % (o, entry_idx, type(entry)))
+ text.append(textwrap.fill(DocCLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
+ else:
+ if not isinstance(opt['description'], string_types):
+ raise AnsibleError("Expected string in description of %s, got %s" % (o, type(opt['description'])))
+ text.append(textwrap.fill(DocCLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
+ del opt['description']
+
+ suboptions = []
+ for subkey in ('options', 'suboptions', 'contains', 'spec'):
+ if subkey in opt:
+ suboptions.append((subkey, opt.pop(subkey)))
+
+ if not required and not return_values and 'default' not in opt:
+ opt['default'] = None
+
+ # sanitize config items
+ conf = {}
+ for config in ('env', 'ini', 'yaml', 'vars', 'keyword'):
+ if config in opt and opt[config]:
+ # Create a copy so we don't modify the original (in case YAML anchors have been used)
+ conf[config] = [dict(item) for item in opt.pop(config)]
+ for ignore in DocCLI.IGNORE:
+ for item in conf[config]:
+ if ignore in item:
+ del item[ignore]
+
+ # reformat cli optoins
+ if 'cli' in opt and opt['cli']:
+ conf['cli'] = []
+ for cli in opt['cli']:
+ if 'option' not in cli:
+ conf['cli'].append({'name': cli['name'], 'option': '--%s' % cli['name'].replace('_', '-')})
+ else:
+ conf['cli'].append(cli)
+ del opt['cli']
+
+ # add custom header for conf
+ if conf:
+ text.append(DocCLI._indent_lines(DocCLI._dump_yaml({'set_via': conf}), opt_indent))
+
+ # these we handle at the end of generic option processing
+ version_added = opt.pop('version_added', None)
+ version_added_collection = opt.pop('version_added_collection', None)
+
+ # general processing for options
+ for k in sorted(opt):
+ if k.startswith('_'):
+ continue
+
+ if is_sequence(opt[k]):
+ text.append(DocCLI._indent_lines('%s: %s' % (k, DocCLI._dump_yaml(opt[k], flow_style=True)), opt_indent))
+ else:
+ text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k: opt[k]}), opt_indent))
+
+ if version_added:
+ text.append("%sadded in: %s\n" % (opt_indent, DocCLI._format_version_added(version_added, version_added_collection)))
+
+ for subkey, subdata in suboptions:
+ text.append('')
+ text.append("%s%s:\n" % (opt_indent, subkey.upper()))
+ DocCLI.add_fields(text, subdata, limit, opt_indent + ' ', return_values, opt_indent)
+ if not suboptions:
+ text.append('')
+
+ def get_role_man_text(self, role, role_json):
+ '''Generate text for the supplied role suitable for display.
+
+ This is similar to get_man_text(), but roles are different enough that we have
+ a separate method for formatting their display.
+
+ :param role: The role name.
+ :param role_json: The JSON for the given role as returned from _create_role_doc().
+
+ :returns: A array of text suitable for displaying to screen.
+ '''
+ text = []
+ opt_indent = " "
+ pad = display.columns * 0.20
+ limit = max(display.columns - int(pad), 70)
+
+ text.append("> %s (%s)\n" % (role.upper(), role_json.get('path')))
+
+ for entry_point in role_json['entry_points']:
+ doc = role_json['entry_points'][entry_point]
+
+ if doc.get('short_description'):
+ text.append("ENTRY POINT: %s - %s\n" % (entry_point, doc.get('short_description')))
+ else:
+ text.append("ENTRY POINT: %s\n" % entry_point)
+
+ if doc.get('description'):
+ if isinstance(doc['description'], list):
+ desc = " ".join(doc['description'])
+ else:
+ desc = doc['description']
+
+ text.append("%s\n" % textwrap.fill(DocCLI.tty_ify(desc),
+ limit, initial_indent=opt_indent,
+ subsequent_indent=opt_indent))
+ if doc.get('options'):
+ text.append("OPTIONS (= is mandatory):\n")
+ DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent)
+ text.append('')
+
+ if doc.get('attributes'):
+ text.append("ATTRIBUTES:\n")
+ text.append(DocCLI._indent_lines(DocCLI._dump_yaml(doc.pop('attributes')), opt_indent))
+ text.append('')
+
+ # generic elements we will handle identically
+ for k in ('author',):
+ if k not in doc:
+ continue
+ if isinstance(doc[k], string_types):
+ text.append('%s: %s' % (k.upper(), textwrap.fill(DocCLI.tty_ify(doc[k]),
+ limit - (len(k) + 2), subsequent_indent=opt_indent)))
+ elif isinstance(doc[k], (list, tuple)):
+ text.append('%s: %s' % (k.upper(), ', '.join(doc[k])))
+ else:
+ # use empty indent since this affects the start of the yaml doc, not it's keys
+ text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k.upper(): doc[k]}), ''))
+ text.append('')
+
+ return text
+
+ @staticmethod
+ def get_man_text(doc, collection_name='', plugin_type=''):
+ # Create a copy so we don't modify the original
+ doc = dict(doc)
+
+ DocCLI.IGNORE = DocCLI.IGNORE + (context.CLIARGS['type'],)
+ opt_indent = " "
+ text = []
+ pad = display.columns * 0.20
+ limit = max(display.columns - int(pad), 70)
+
+ plugin_name = doc.get(context.CLIARGS['type'], doc.get('name')) or doc.get('plugin_type') or plugin_type
+ if collection_name:
+ plugin_name = '%s.%s' % (collection_name, plugin_name)
+
+ text.append("> %s (%s)\n" % (plugin_name.upper(), doc.pop('filename')))
+
+ if isinstance(doc['description'], list):
+ desc = " ".join(doc.pop('description'))
+ else:
+ desc = doc.pop('description')
+
+ text.append("%s\n" % textwrap.fill(DocCLI.tty_ify(desc), limit, initial_indent=opt_indent,
+ subsequent_indent=opt_indent))
+
+ if 'version_added' in doc:
+ version_added = doc.pop('version_added')
+ version_added_collection = doc.pop('version_added_collection', None)
+ text.append("ADDED IN: %s\n" % DocCLI._format_version_added(version_added, version_added_collection))
+
+ if doc.get('deprecated', False):
+ text.append("DEPRECATED: \n")
+ if isinstance(doc['deprecated'], dict):
+ if 'removed_at_date' in doc['deprecated']:
+ text.append(
+ "\tReason: %(why)s\n\tWill be removed in a release after %(removed_at_date)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated')
+ )
+ else:
+ if 'version' in doc['deprecated'] and 'removed_in' not in doc['deprecated']:
+ doc['deprecated']['removed_in'] = doc['deprecated']['version']
+ text.append("\tReason: %(why)s\n\tWill be removed in: Ansible %(removed_in)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated'))
+ else:
+ text.append("%s" % doc.pop('deprecated'))
+ text.append("\n")
+
+ if doc.pop('has_action', False):
+ text.append(" * note: %s\n" % "This module has a corresponding action plugin.")
+
+ if doc.get('options', False):
+ text.append("OPTIONS (= is mandatory):\n")
+ DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent)
+ text.append('')
+
+ if doc.get('attributes', False):
+ text.append("ATTRIBUTES:\n")
+ text.append(DocCLI._indent_lines(DocCLI._dump_yaml(doc.pop('attributes')), opt_indent))
+ text.append('')
+
+ if doc.get('notes', False):
+ text.append("NOTES:")
+ for note in doc['notes']:
+ text.append(textwrap.fill(DocCLI.tty_ify(note), limit - 6,
+ initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
+ text.append('')
+ text.append('')
+ del doc['notes']
+
+ if doc.get('seealso', False):
+ text.append("SEE ALSO:")
+ for item in doc['seealso']:
+ if 'module' in item:
+ text.append(textwrap.fill(DocCLI.tty_ify('Module %s' % item['module']),
+ limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
+ description = item.get('description', 'The official documentation on the %s module.' % item['module'])
+ text.append(textwrap.fill(DocCLI.tty_ify(description), limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+ text.append(textwrap.fill(DocCLI.tty_ify(get_versioned_doclink('modules/%s_module.html' % item['module'])),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent))
+ elif 'name' in item and 'link' in item and 'description' in item:
+ text.append(textwrap.fill(DocCLI.tty_ify(item['name']),
+ limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
+ text.append(textwrap.fill(DocCLI.tty_ify(item['description']),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+ text.append(textwrap.fill(DocCLI.tty_ify(item['link']),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+ elif 'ref' in item and 'description' in item:
+ text.append(textwrap.fill(DocCLI.tty_ify('Ansible documentation [%s]' % item['ref']),
+ limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
+ text.append(textwrap.fill(DocCLI.tty_ify(item['description']),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+ text.append(textwrap.fill(DocCLI.tty_ify(get_versioned_doclink('/#stq=%s&stp=1' % item['ref'])),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+
+ text.append('')
+ text.append('')
+ del doc['seealso']
+
+ if doc.get('requirements', False):
+ req = ", ".join(doc.pop('requirements'))
+ text.append("REQUIREMENTS:%s\n" % textwrap.fill(DocCLI.tty_ify(req), limit - 16, initial_indent=" ", subsequent_indent=opt_indent))
+
+ # Generic handler
+ for k in sorted(doc):
+ if k in DocCLI.IGNORE or not doc[k]:
+ continue
+ if isinstance(doc[k], string_types):
+ text.append('%s: %s' % (k.upper(), textwrap.fill(DocCLI.tty_ify(doc[k]), limit - (len(k) + 2), subsequent_indent=opt_indent)))
+ elif isinstance(doc[k], (list, tuple)):
+ text.append('%s: %s' % (k.upper(), ', '.join(doc[k])))
+ else:
+ # use empty indent since this affects the start of the yaml doc, not it's keys
+ text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k.upper(): doc[k]}), ''))
+ del doc[k]
+ text.append('')
+
+ if doc.get('plainexamples', False):
+ text.append("EXAMPLES:")
+ text.append('')
+ if isinstance(doc['plainexamples'], string_types):
+ text.append(doc.pop('plainexamples').strip())
+ else:
+ try:
+ text.append(yaml_dump(doc.pop('plainexamples'), indent=2, default_flow_style=False))
+ except Exception as e:
+ raise AnsibleParserError("Unable to parse examples section", orig_exc=e)
+ text.append('')
+ text.append('')
+
+ if doc.get('returndocs', False):
+ text.append("RETURN VALUES:")
+ DocCLI.add_fields(text, doc.pop('returndocs'), limit, opt_indent, return_values=True)
+
+ return "\n".join(text)
+
+
+def _do_yaml_snippet(doc):
+ text = []
+
+ mdesc = DocCLI.tty_ify(doc['short_description'])
+ module = doc.get('module')
+
+ if module:
+ # this is actually a usable task!
+ text.append("- name: %s" % (mdesc))
+ text.append(" %s:" % (module))
+ else:
+ # just a comment, hopefully useful yaml file
+ text.append("# %s:" % doc.get('plugin', doc.get('name')))
+
+ pad = 29
+ subdent = '# '.rjust(pad + 2)
+ limit = display.columns - pad
+
+ for o in sorted(doc['options'].keys()):
+ opt = doc['options'][o]
+ if isinstance(opt['description'], string_types):
+ desc = DocCLI.tty_ify(opt['description'])
+ else:
+ desc = DocCLI.tty_ify(" ".join(opt['description']))
+
+ required = opt.get('required', False)
+ if not isinstance(required, bool):
+ raise ValueError("Incorrect value for 'Required', a boolean is needed: %s" % required)
+
+ o = '%s:' % o
+ if module:
+ if required:
+ desc = "(required) %s" % desc
+ text.append(" %-20s # %s" % (o, textwrap.fill(desc, limit, subsequent_indent=subdent)))
+ else:
+ if required:
+ default = '(required)'
+ else:
+ default = opt.get('default', 'None')
+
+ text.append("%s %-9s # %s" % (o, default, textwrap.fill(desc, limit, subsequent_indent=subdent, max_lines=3)))
+
+ return text
+
+
+def _do_lookup_snippet(doc):
+ text = []
+ snippet = "lookup('%s', " % doc.get('plugin', doc.get('name'))
+ comment = []
+
+ for o in sorted(doc['options'].keys()):
+
+ opt = doc['options'][o]
+ comment.append('# %s(%s): %s' % (o, opt.get('type', 'string'), opt.get('description', '')))
+ if o in ('_terms', '_raw', '_list'):
+ # these are 'list of arguments'
+ snippet += '< %s >' % (o)
+ continue
+
+ required = opt.get('required', False)
+ if not isinstance(required, bool):
+ raise ValueError("Incorrect value for 'Required', a boolean is needed: %s" % required)
+
+ if required:
+ default = '<REQUIRED>'
+ else:
+ default = opt.get('default', 'None')
+
+ if opt.get('type') in ('string', 'str'):
+ snippet += ", %s='%s'" % (o, default)
+ else:
+ snippet += ', %s=%s' % (o, default)
+
+ snippet += ")"
+
+ if comment:
+ text.extend(comment)
+ text.append('')
+ text.append(snippet)
+
+ return text
+
+
+def main(args=None):
+ DocCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy
new file mode 100755
index 0000000..3cb7fe2
--- /dev/null
+++ b/bin/ansible-galaxy
@@ -0,0 +1,1865 @@
+#!/usr/bin/env python
+# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
+# Copyright: (c) 2018-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import json
+import os.path
+import re
+import shutil
+import sys
+import textwrap
+import time
+import typing as t
+
+from dataclasses import dataclass
+from yaml.error import YAMLError
+
+import ansible.constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
+from ansible.galaxy.api import GalaxyAPI
+from ansible.galaxy.collection import (
+ build_collection,
+ download_collections,
+ find_existing_collections,
+ install_collections,
+ publish_collection,
+ validate_collection_name,
+ validate_collection_path,
+ verify_collections,
+ SIGNATURE_COUNT_RE,
+)
+from ansible.galaxy.collection.concrete_artifact_manager import (
+ ConcreteArtifactsManager,
+)
+from ansible.galaxy.collection.gpg import GPG_ERROR_MAP
+from ansible.galaxy.dependency_resolution.dataclasses import Requirement
+
+from ansible.galaxy.role import GalaxyRole
+from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken, NoTokenSentinel
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+from ansible.module_utils.common.collections import is_iterable
+from ansible.module_utils.common.yaml import yaml_dump, yaml_load
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils import six
+from ansible.parsing.dataloader import DataLoader
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.playbook.role.requirement import RoleRequirement
+from ansible.template import Templar
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.display import Display
+from ansible.utils.plugin_docs import get_versioned_doclink
+
+display = Display()
+urlparse = six.moves.urllib.parse.urlparse
+
+# config definition by position: name, required, type
+SERVER_DEF = [
+ ('url', True, 'str'),
+ ('username', False, 'str'),
+ ('password', False, 'str'),
+ ('token', False, 'str'),
+ ('auth_url', False, 'str'),
+ ('v3', False, 'bool'),
+ ('validate_certs', False, 'bool'),
+ ('client_id', False, 'str'),
+ ('timeout', False, 'int'),
+]
+
+# config definition fields
+SERVER_ADDITIONAL = {
+ 'v3': {'default': 'False'},
+ 'validate_certs': {'cli': [{'name': 'validate_certs'}]},
+ 'timeout': {'default': '60', 'cli': [{'name': 'timeout'}]},
+ 'token': {'default': None},
+}
+
+
+def with_collection_artifacts_manager(wrapped_method):
+ """Inject an artifacts manager if not passed explicitly.
+
+ This decorator constructs a ConcreteArtifactsManager and maintains
+ the related temporary directory auto-cleanup around the target
+ method invocation.
+ """
+ def method_wrapper(*args, **kwargs):
+ if 'artifacts_manager' in kwargs:
+ return wrapped_method(*args, **kwargs)
+
+ # FIXME: use validate_certs context from Galaxy servers when downloading collections
+ artifacts_manager_kwargs = {'validate_certs': context.CLIARGS['resolved_validate_certs']}
+
+ keyring = context.CLIARGS.get('keyring', None)
+ if keyring is not None:
+ artifacts_manager_kwargs.update({
+ 'keyring': GalaxyCLI._resolve_path(keyring),
+ 'required_signature_count': context.CLIARGS.get('required_valid_signature_count', None),
+ 'ignore_signature_errors': context.CLIARGS.get('ignore_gpg_errors', None),
+ })
+
+ with ConcreteArtifactsManager.under_tmpdir(
+ C.DEFAULT_LOCAL_TMP,
+ **artifacts_manager_kwargs
+ ) as concrete_artifact_cm:
+ kwargs['artifacts_manager'] = concrete_artifact_cm
+ return wrapped_method(*args, **kwargs)
+ return method_wrapper
+
+
+def _display_header(path, h1, h2, w1=10, w2=7):
+ display.display('\n# {0}\n{1:{cwidth}} {2:{vwidth}}\n{3} {4}\n'.format(
+ path,
+ h1,
+ h2,
+ '-' * max([len(h1), w1]), # Make sure that the number of dashes is at least the width of the header
+ '-' * max([len(h2), w2]),
+ cwidth=w1,
+ vwidth=w2,
+ ))
+
+
+def _display_role(gr):
+ install_info = gr.install_info
+ version = None
+ if install_info:
+ version = install_info.get("version", None)
+ if not version:
+ version = "(unknown version)"
+ display.display("- %s, %s" % (gr.name, version))
+
+
+def _display_collection(collection, cwidth=10, vwidth=7, min_cwidth=10, min_vwidth=7):
+ display.display('{fqcn:{cwidth}} {version:{vwidth}}'.format(
+ fqcn=to_text(collection.fqcn),
+ version=collection.ver,
+ cwidth=max(cwidth, min_cwidth), # Make sure the width isn't smaller than the header
+ vwidth=max(vwidth, min_vwidth)
+ ))
+
+
+def _get_collection_widths(collections):
+ if not is_iterable(collections):
+ collections = (collections, )
+
+ fqcn_set = {to_text(c.fqcn) for c in collections}
+ version_set = {to_text(c.ver) for c in collections}
+
+ fqcn_length = len(max(fqcn_set, key=len))
+ version_length = len(max(version_set, key=len))
+
+ return fqcn_length, version_length
+
+
+def validate_signature_count(value):
+ match = re.match(SIGNATURE_COUNT_RE, value)
+
+ if match is None:
+ raise ValueError(f"{value} is not a valid signature count value")
+
+ return value
+
+
+@dataclass
+class RoleDistributionServer:
+ _api: t.Union[GalaxyAPI, None]
+ api_servers: list[GalaxyAPI]
+
+ @property
+ def api(self):
+ if self._api:
+ return self._api
+
+ for server in self.api_servers:
+ try:
+ if u'v1' in server.available_api_versions:
+ self._api = server
+ break
+ except Exception:
+ continue
+
+ if not self._api:
+ self._api = self.api_servers[0]
+
+ return self._api
+
+
+class GalaxyCLI(CLI):
+ '''Command to manage Ansible roles and collections.
+
+ None of the CLI tools are designed to run concurrently with themselves.
+ Use an external scheduler and/or locking to ensure there are no clashing operations.
+ '''
+
+ name = 'ansible-galaxy'
+
+ SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
+
+ def __init__(self, args):
+ self._raw_args = args
+ self._implicit_role = False
+
+ if len(args) > 1:
+ # Inject role into sys.argv[1] as a backwards compatibility step
+ if args[1] not in ['-h', '--help', '--version'] and 'role' not in args and 'collection' not in args:
+ # TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
+ args.insert(1, 'role')
+ self._implicit_role = True
+ # since argparse doesn't allow hidden subparsers, handle dead login arg from raw args after "role" normalization
+ if args[1:3] == ['role', 'login']:
+ display.error(
+ "The login command was removed in late 2020. An API key is now required to publish roles or collections "
+ "to Galaxy. The key can be found at https://galaxy.ansible.com/me/preferences, and passed to the "
+ "ansible-galaxy CLI via a file at {0} or (insecurely) via the `--token` "
+ "command-line argument.".format(to_text(C.GALAXY_TOKEN_PATH)))
+ sys.exit(1)
+
+ self.api_servers = []
+ self.galaxy = None
+ self.lazy_role_api = None
+ super(GalaxyCLI, self).__init__(args)
+
+ def init_parser(self):
+ ''' create an options parser for bin/ansible '''
+
+ super(GalaxyCLI, self).init_parser(
+ desc="Perform various Role and Collection related operations.",
+ )
+
+ # Common arguments that apply to more than 1 action
+ common = opt_help.argparse.ArgumentParser(add_help=False)
+ common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
+ common.add_argument('--token', '--api-key', dest='api_key',
+ help='The Ansible Galaxy API key which can be found at '
+ 'https://galaxy.ansible.com/me/preferences.')
+ common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs', help='Ignore SSL certificate validation errors.', default=None)
+ common.add_argument('--timeout', dest='timeout', type=int,
+ help="The time to wait for operations against the galaxy server, defaults to 60s.")
+
+ opt_help.add_verbosity_options(common)
+
+ force = opt_help.argparse.ArgumentParser(add_help=False)
+ force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
+ help='Force overwriting an existing role or collection')
+
+ github = opt_help.argparse.ArgumentParser(add_help=False)
+ github.add_argument('github_user', help='GitHub username')
+ github.add_argument('github_repo', help='GitHub repository')
+
+ offline = opt_help.argparse.ArgumentParser(add_help=False)
+ offline.add_argument('--offline', dest='offline', default=False, action='store_true',
+ help="Don't query the galaxy API when creating roles")
+
+ default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
+ roles_path = opt_help.argparse.ArgumentParser(add_help=False)
+ roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
+ default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
+ help='The path to the directory containing your roles. The default is the first '
+ 'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
+
+ collections_path = opt_help.argparse.ArgumentParser(add_help=False)
+ collections_path.add_argument('-p', '--collections-path', dest='collections_path', type=opt_help.unfrack_path(pathsep=True),
+ default=AnsibleCollectionConfig.collection_paths,
+ action=opt_help.PrependListAction,
+ help="One or more directories to search for collections in addition "
+ "to the default COLLECTIONS_PATHS. Separate multiple paths "
+ "with '{0}'.".format(os.path.pathsep))
+
+ cache_options = opt_help.argparse.ArgumentParser(add_help=False)
+ cache_options.add_argument('--clear-response-cache', dest='clear_response_cache', action='store_true',
+ default=False, help='Clear the existing server response cache.')
+ cache_options.add_argument('--no-cache', dest='no_cache', action='store_true', default=False,
+ help='Do not use the server response cache.')
+
+ # Add sub parser for the Galaxy role type (role or collection)
+ type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
+ type_parser.required = True
+
+ # Add sub parser for the Galaxy collection actions
+ collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
+ collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
+ collection_parser.required = True
+ self.add_download_options(collection_parser, parents=[common, cache_options])
+ self.add_init_options(collection_parser, parents=[common, force])
+ self.add_build_options(collection_parser, parents=[common, force])
+ self.add_publish_options(collection_parser, parents=[common])
+ self.add_install_options(collection_parser, parents=[common, force, cache_options])
+ self.add_list_options(collection_parser, parents=[common, collections_path])
+ self.add_verify_options(collection_parser, parents=[common, collections_path])
+
+ # Add sub parser for the Galaxy role actions
+ role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
+ role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
+ role_parser.required = True
+ self.add_init_options(role_parser, parents=[common, force, offline])
+ self.add_remove_options(role_parser, parents=[common, roles_path])
+ self.add_delete_options(role_parser, parents=[common, github])
+ self.add_list_options(role_parser, parents=[common, roles_path])
+ self.add_search_options(role_parser, parents=[common])
+ self.add_import_options(role_parser, parents=[common, github])
+ self.add_setup_options(role_parser, parents=[common, roles_path])
+
+ self.add_info_options(role_parser, parents=[common, roles_path, offline])
+ self.add_install_options(role_parser, parents=[common, force, roles_path])
+
+ def add_download_options(self, parser, parents=None):
+ download_parser = parser.add_parser('download', parents=parents,
+ help='Download collections and their dependencies as a tarball for an '
+ 'offline install.')
+ download_parser.set_defaults(func=self.execute_download)
+
+ download_parser.add_argument('args', help='Collection(s)', metavar='collection', nargs='*')
+
+ download_parser.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
+ help="Don't download collection(s) listed as dependencies.")
+
+ download_parser.add_argument('-p', '--download-path', dest='download_path',
+ default='./collections',
+ help='The directory to download the collections to.')
+ download_parser.add_argument('-r', '--requirements-file', dest='requirements',
+ help='A file containing a list of collections to be downloaded.')
+ download_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
+ help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
+
+ def add_init_options(self, parser, parents=None):
+ galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
+
+ init_parser = parser.add_parser('init', parents=parents,
+ help='Initialize new {0} with the base structure of a '
+ '{0}.'.format(galaxy_type))
+ init_parser.set_defaults(func=self.execute_init)
+
+ init_parser.add_argument('--init-path', dest='init_path', default='./',
+ help='The path in which the skeleton {0} will be created. The default is the '
+ 'current working directory.'.format(galaxy_type))
+ init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
+ default=C.GALAXY_COLLECTION_SKELETON if galaxy_type == 'collection' else C.GALAXY_ROLE_SKELETON,
+ help='The path to a {0} skeleton that the new {0} should be based '
+ 'upon.'.format(galaxy_type))
+
+ obj_name_kwargs = {}
+ if galaxy_type == 'collection':
+ obj_name_kwargs['type'] = validate_collection_name
+ init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
+ **obj_name_kwargs)
+
+ if galaxy_type == 'role':
+ init_parser.add_argument('--type', dest='role_type', action='store', default='default',
+ help="Initialize using an alternate role type. Valid types include: 'container', "
+ "'apb' and 'network'.")
+
+ def add_remove_options(self, parser, parents=None):
+ remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
+ remove_parser.set_defaults(func=self.execute_remove)
+
+ remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
+
+ def add_delete_options(self, parser, parents=None):
+ delete_parser = parser.add_parser('delete', parents=parents,
+ help='Removes the role from Galaxy. It does not remove or alter the actual '
+ 'GitHub repository.')
+ delete_parser.set_defaults(func=self.execute_delete)
+
+ def add_list_options(self, parser, parents=None):
+ galaxy_type = 'role'
+ if parser.metavar == 'COLLECTION_ACTION':
+ galaxy_type = 'collection'
+
+ list_parser = parser.add_parser('list', parents=parents,
+ help='Show the name and version of each {0} installed in the {0}s_path.'.format(galaxy_type))
+
+ list_parser.set_defaults(func=self.execute_list)
+
+ list_parser.add_argument(galaxy_type, help=galaxy_type.capitalize(), nargs='?', metavar=galaxy_type)
+
+ if galaxy_type == 'collection':
+ list_parser.add_argument('--format', dest='output_format', choices=('human', 'yaml', 'json'), default='human',
+ help="Format to display the list of collections in.")
+
+ def add_search_options(self, parser, parents=None):
+ search_parser = parser.add_parser('search', parents=parents,
+ help='Search the Galaxy database by tags, platforms, author and multiple '
+ 'keywords.')
+ search_parser.set_defaults(func=self.execute_search)
+
+ search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
+ search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
+ search_parser.add_argument('--author', dest='author', help='GitHub username')
+ search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
+
+ def add_import_options(self, parser, parents=None):
+ import_parser = parser.add_parser('import', parents=parents, help='Import a role into a galaxy server')
+ import_parser.set_defaults(func=self.execute_import)
+
+ import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
+ help="Don't wait for import results.")
+ import_parser.add_argument('--branch', dest='reference',
+ help='The name of a branch to import. Defaults to the repository\'s default branch '
+ '(usually master)')
+ import_parser.add_argument('--role-name', dest='role_name',
+ help='The name the role should have, if different than the repo name')
+ import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
+ help='Check the status of the most recent import request for given github_'
+ 'user/github_repo.')
+
+ def add_setup_options(self, parser, parents=None):
+ setup_parser = parser.add_parser('setup', parents=parents,
+ help='Manage the integration between Galaxy and the given source.')
+ setup_parser.set_defaults(func=self.execute_setup)
+
+ setup_parser.add_argument('--remove', dest='remove_id', default=None,
+ help='Remove the integration matching the provided ID value. Use --list to see '
+ 'ID values.')
+ setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
+ help='List all of your integrations.')
+ setup_parser.add_argument('source', help='Source')
+ setup_parser.add_argument('github_user', help='GitHub username')
+ setup_parser.add_argument('github_repo', help='GitHub repository')
+ setup_parser.add_argument('secret', help='Secret')
+
+ def add_info_options(self, parser, parents=None):
+ info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
+ info_parser.set_defaults(func=self.execute_info)
+
+ info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
+
+ def add_verify_options(self, parser, parents=None):
+ galaxy_type = 'collection'
+ verify_parser = parser.add_parser('verify', parents=parents, help='Compare checksums with the collection(s) '
+ 'found on the server and the installed copy. This does not verify dependencies.')
+ verify_parser.set_defaults(func=self.execute_verify)
+
+ verify_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', help='The installed collection(s) name. '
+ 'This is mutually exclusive with --requirements-file.')
+ verify_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
+ help='Ignore errors during verification and continue with the next specified collection.')
+ verify_parser.add_argument('--offline', dest='offline', action='store_true', default=False,
+ help='Validate collection integrity locally without contacting server for '
+ 'canonical manifest hash.')
+ verify_parser.add_argument('-r', '--requirements-file', dest='requirements',
+ help='A file containing a list of collections to be verified.')
+ verify_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
+ help='The keyring used during signature verification') # Eventually default to ~/.ansible/pubring.kbx?
+ verify_parser.add_argument('--signature', dest='signatures', action='append',
+ help='An additional signature source to verify the authenticity of the MANIFEST.json before using '
+ 'it to verify the rest of the contents of a collection from a Galaxy server. Use in '
+ 'conjunction with a positional collection name (mutually exclusive with --requirements-file).')
+ valid_signature_count_help = 'The number of signatures that must successfully verify the collection. This should be a positive integer ' \
+ 'or all to signify that all signatures must be used to verify the collection. ' \
+ 'Prepend the value with + to fail if no valid signatures are found for the collection (e.g. +all).'
+ ignore_gpg_status_help = 'A status code to ignore during signature verification (for example, NO_PUBKEY). ' \
+ 'Provide this option multiple times to ignore a list of status codes. ' \
+ 'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).'
+ verify_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
+ help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
+ verify_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
+ help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
+ choices=list(GPG_ERROR_MAP.keys()))
+
+ def add_install_options(self, parser, parents=None):
+ galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
+
+ args_kwargs = {}
+ if galaxy_type == 'collection':
+ args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
+ 'mutually exclusive with --requirements-file.'
+ ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
+ 'collection. This will not ignore dependency conflict errors.'
+ else:
+ args_kwargs['help'] = 'Role name, URL or tar file'
+ ignore_errors_help = 'Ignore errors and continue with the next specified role.'
+
+ install_parser = parser.add_parser('install', parents=parents,
+ help='Install {0}(s) from file(s), URL(s) or Ansible '
+ 'Galaxy'.format(galaxy_type))
+ install_parser.set_defaults(func=self.execute_install)
+
+ install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
+ install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
+ help=ignore_errors_help)
+
+ install_exclusive = install_parser.add_mutually_exclusive_group()
+ install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
+ help="Don't download {0}s listed as dependencies.".format(galaxy_type))
+ install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
+ help="Force overwriting an existing {0} and its "
+ "dependencies.".format(galaxy_type))
+
+ valid_signature_count_help = 'The number of signatures that must successfully verify the collection. This should be a positive integer ' \
+ 'or -1 to signify that all signatures must be used to verify the collection. ' \
+ 'Prepend the value with + to fail if no valid signatures are found for the collection (e.g. +all).'
+ ignore_gpg_status_help = 'A status code to ignore during signature verification (for example, NO_PUBKEY). ' \
+ 'Provide this option multiple times to ignore a list of status codes. ' \
+ 'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).'
+
+ if galaxy_type == 'collection':
+ install_parser.add_argument('-p', '--collections-path', dest='collections_path',
+ default=self._get_default_collection_path(),
+ help='The path to the directory containing your collections.')
+ install_parser.add_argument('-r', '--requirements-file', dest='requirements',
+ help='A file containing a list of collections to be installed.')
+ install_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
+ help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
+ install_parser.add_argument('-U', '--upgrade', dest='upgrade', action='store_true', default=False,
+ help='Upgrade installed collection artifacts. This will also update dependencies unless --no-deps is provided')
+ install_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
+ help='The keyring used during signature verification') # Eventually default to ~/.ansible/pubring.kbx?
+ install_parser.add_argument('--disable-gpg-verify', dest='disable_gpg_verify', action='store_true',
+ default=C.GALAXY_DISABLE_GPG_VERIFY,
+ help='Disable GPG signature verification when installing collections from a Galaxy server')
+ install_parser.add_argument('--signature', dest='signatures', action='append',
+ help='An additional signature source to verify the authenticity of the MANIFEST.json before '
+ 'installing the collection from a Galaxy server. Use in conjunction with a positional '
+ 'collection name (mutually exclusive with --requirements-file).')
+ install_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
+ help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
+ install_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
+ help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
+ choices=list(GPG_ERROR_MAP.keys()))
+ install_parser.add_argument('--offline', dest='offline', action='store_true', default=False,
+ help='Install collection artifacts (tarballs) without contacting any distribution servers. '
+ 'This does not apply to collections in remote Git repositories or URLs to remote tarballs.'
+ )
+ else:
+ install_parser.add_argument('-r', '--role-file', dest='requirements',
+ help='A file containing a list of roles to be installed.')
+
+ r_re = re.compile(r'^(?<!-)-[a-zA-Z]*r[a-zA-Z]*') # -r, -fr
+ contains_r = bool([a for a in self._raw_args if r_re.match(a)])
+ role_file_re = re.compile(r'--role-file($|=)') # --role-file foo, --role-file=foo
+ contains_role_file = bool([a for a in self._raw_args if role_file_re.match(a)])
+ if self._implicit_role and (contains_r or contains_role_file):
+ # Any collections in the requirements files will also be installed
+ install_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
+ help='The keyring used during collection signature verification')
+ install_parser.add_argument('--disable-gpg-verify', dest='disable_gpg_verify', action='store_true',
+ default=C.GALAXY_DISABLE_GPG_VERIFY,
+ help='Disable GPG signature verification when installing collections from a Galaxy server')
+ install_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
+ help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
+ install_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
+ help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
+ choices=list(GPG_ERROR_MAP.keys()))
+
+ install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
+ default=False,
+ help='Use tar instead of the scm archive option when packaging the role.')
+
+ def add_build_options(self, parser, parents=None):
+ build_parser = parser.add_parser('build', parents=parents,
+ help='Build an Ansible collection artifact that can be published to Ansible '
+ 'Galaxy.')
+ build_parser.set_defaults(func=self.execute_build)
+
+ build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
+ help='Path to the collection(s) directory to build. This should be the directory '
+ 'that contains the galaxy.yml file. The default is the current working '
+ 'directory.')
+ build_parser.add_argument('--output-path', dest='output_path', default='./',
+ help='The path in which the collection is built to. The default is the current '
+ 'working directory.')
+
+ def add_publish_options(self, parser, parents=None):
+ publish_parser = parser.add_parser('publish', parents=parents,
+ help='Publish a collection artifact to Ansible Galaxy.')
+ publish_parser.set_defaults(func=self.execute_publish)
+
+ publish_parser.add_argument('args', metavar='collection_path',
+ help='The path to the collection tarball to publish.')
+ publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
+ help="Don't wait for import validation results.")
+ publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
+ help="The time to wait for the collection import process to finish.")
+
+ def post_process_args(self, options):
+ options = super(GalaxyCLI, self).post_process_args(options)
+
+ # ensure we have 'usable' cli option
+ setattr(options, 'validate_certs', (None if options.ignore_certs is None else not options.ignore_certs))
+ # the default if validate_certs is None
+ setattr(options, 'resolved_validate_certs', (options.validate_certs if options.validate_certs is not None else not C.GALAXY_IGNORE_CERTS))
+
+ display.verbosity = options.verbosity
+ return options
+
+ def run(self):
+
+ super(GalaxyCLI, self).run()
+
+ self.galaxy = Galaxy()
+
+ def server_config_def(section, key, required, option_type):
+ config_def = {
+ 'description': 'The %s of the %s Galaxy server' % (key, section),
+ 'ini': [
+ {
+ 'section': 'galaxy_server.%s' % section,
+ 'key': key,
+ }
+ ],
+ 'env': [
+ {'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
+ ],
+ 'required': required,
+ 'type': option_type,
+ }
+ if key in SERVER_ADDITIONAL:
+ config_def.update(SERVER_ADDITIONAL[key])
+
+ return config_def
+
+ galaxy_options = {}
+ for optional_key in ['clear_response_cache', 'no_cache', 'timeout']:
+ if optional_key in context.CLIARGS:
+ galaxy_options[optional_key] = context.CLIARGS[optional_key]
+
+ config_servers = []
+
+ # Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
+ server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
+ for server_priority, server_key in enumerate(server_list, start=1):
+ # Abuse the 'plugin config' by making 'galaxy_server' a type of plugin
+ # Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
+ # section [galaxy_server.<server>] for the values url, username, password, and token.
+ config_dict = dict((k, server_config_def(server_key, k, req, ensure_type)) for k, req, ensure_type in SERVER_DEF)
+ defs = AnsibleLoader(yaml_dump(config_dict)).get_single_data()
+ C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
+
+ # resolve the config created options above with existing config and user options
+ server_options = C.config.get_plugin_options('galaxy_server', server_key)
+
+ # auth_url is used to create the token, but not directly by GalaxyAPI, so
+ # it doesn't need to be passed as kwarg to GalaxyApi, same for others we pop here
+ auth_url = server_options.pop('auth_url')
+ client_id = server_options.pop('client_id')
+ token_val = server_options['token'] or NoTokenSentinel
+ username = server_options['username']
+ v3 = server_options.pop('v3')
+ if server_options['validate_certs'] is None:
+ server_options['validate_certs'] = context.CLIARGS['resolved_validate_certs']
+ validate_certs = server_options['validate_certs']
+
+ if v3:
+ # This allows a user to explicitly indicate the server uses the /v3 API
+ # This was added for testing against pulp_ansible and I'm not sure it has
+ # a practical purpose outside of this use case. As such, this option is not
+ # documented as of now
+ server_options['available_api_versions'] = {'v3': '/v3'}
+
+ # default case if no auth info is provided.
+ server_options['token'] = None
+
+ if username:
+ server_options['token'] = BasicAuthToken(username, server_options['password'])
+ else:
+ if token_val:
+ if auth_url:
+ server_options['token'] = KeycloakToken(access_token=token_val,
+ auth_url=auth_url,
+ validate_certs=validate_certs,
+ client_id=client_id)
+ else:
+ # The galaxy v1 / github / django / 'Token'
+ server_options['token'] = GalaxyToken(token=token_val)
+
+ server_options.update(galaxy_options)
+ config_servers.append(GalaxyAPI(
+ self.galaxy, server_key,
+ priority=server_priority,
+ **server_options
+ ))
+
+ cmd_server = context.CLIARGS['api_server']
+ cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
+
+ validate_certs = context.CLIARGS['resolved_validate_certs']
+ if cmd_server:
+ # Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
+ # entry, otherwise create a new API entry for the server specified.
+ config_server = next((s for s in config_servers if s.name == cmd_server), None)
+ if config_server:
+ self.api_servers.append(config_server)
+ else:
+ self.api_servers.append(GalaxyAPI(
+ self.galaxy, 'cmd_arg', cmd_server, token=cmd_token,
+ priority=len(config_servers) + 1,
+ validate_certs=validate_certs,
+ **galaxy_options
+ ))
+ else:
+ self.api_servers = config_servers
+
+ # Default to C.GALAXY_SERVER if no servers were defined
+ if len(self.api_servers) == 0:
+ self.api_servers.append(GalaxyAPI(
+ self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token,
+ priority=0,
+ validate_certs=validate_certs,
+ **galaxy_options
+ ))
+
+ # checks api versions once a GalaxyRole makes an api call
+ # self.api can be used to evaluate the best server immediately
+ self.lazy_role_api = RoleDistributionServer(None, self.api_servers)
+
+ return context.CLIARGS['func']()
+
+ @property
+ def api(self):
+ return self.lazy_role_api.api
+
+ def _get_default_collection_path(self):
+ return C.COLLECTIONS_PATHS[0]
+
+ def _parse_requirements_file(self, requirements_file, allow_old_format=True, artifacts_manager=None, validate_signature_options=True):
+ """
+ Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
+ requirements file format:
+
+ # v1 (roles only)
+ - src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
+ name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
+ scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
+ version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
+ include: Path to additional requirements.yml files.
+
+ # v2 (roles and collections)
+ ---
+ roles:
+ # Same as v1 format just under the roles key
+
+ collections:
+ - namespace.collection
+ - name: namespace.collection
+ version: version identifier, multiple identifiers are separated by ','
+ source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
+ type: git|file|url|galaxy
+
+ :param requirements_file: The path to the requirements file.
+ :param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
+ :param artifacts_manager: Artifacts manager.
+ :return: a dict containing roles and collections to found in the requirements file.
+ """
+ requirements = {
+ 'roles': [],
+ 'collections': [],
+ }
+
+ b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
+ if not os.path.exists(b_requirements_file):
+ raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
+
+ display.vvv("Reading requirement file at '%s'" % requirements_file)
+ with open(b_requirements_file, 'rb') as req_obj:
+ try:
+ file_requirements = yaml_load(req_obj)
+ except YAMLError as err:
+ raise AnsibleError(
+ "Failed to parse the requirements yml at '%s' with the following error:\n%s"
+ % (to_native(requirements_file), to_native(err)))
+
+ if file_requirements is None:
+ raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
+
+ def parse_role_req(requirement):
+ if "include" not in requirement:
+ role = RoleRequirement.role_yaml_parse(requirement)
+ display.vvv("found role %s in yaml file" % to_text(role))
+ if "name" not in role and "src" not in role:
+ raise AnsibleError("Must specify name or src for role")
+ return [GalaxyRole(self.galaxy, self.lazy_role_api, **role)]
+ else:
+ b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
+ if not os.path.isfile(b_include_path):
+ raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
+ % (to_native(b_include_path), to_native(requirements_file)))
+
+ with open(b_include_path, 'rb') as f_include:
+ try:
+ return [GalaxyRole(self.galaxy, self.lazy_role_api, **r) for r in
+ (RoleRequirement.role_yaml_parse(i) for i in yaml_load(f_include))]
+ except Exception as e:
+ raise AnsibleError("Unable to load data from include requirements file: %s %s"
+ % (to_native(requirements_file), to_native(e)))
+
+ if isinstance(file_requirements, list):
+ # Older format that contains only roles
+ if not allow_old_format:
+ raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
+ "a list of collections to install")
+
+ for role_req in file_requirements:
+ requirements['roles'] += parse_role_req(role_req)
+
+ else:
+ # Newer format with a collections and/or roles key
+ extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
+ if extra_keys:
+ raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
+ "file. Found: %s" % (to_native(", ".join(extra_keys))))
+
+ for role_req in file_requirements.get('roles') or []:
+ requirements['roles'] += parse_role_req(role_req)
+
+ requirements['collections'] = [
+ Requirement.from_requirement_dict(
+ self._init_coll_req_dict(collection_req),
+ artifacts_manager,
+ validate_signature_options,
+ )
+ for collection_req in file_requirements.get('collections') or []
+ ]
+
+ return requirements
+
+ def _init_coll_req_dict(self, coll_req):
+ if not isinstance(coll_req, dict):
+ # Assume it's a string:
+ return {'name': coll_req}
+
+ if (
+ 'name' not in coll_req or
+ not coll_req.get('source') or
+ coll_req.get('type', 'galaxy') != 'galaxy'
+ ):
+ return coll_req
+
+ # Try and match up the requirement source with our list of Galaxy API
+ # servers defined in the config, otherwise create a server with that
+ # URL without any auth.
+ coll_req['source'] = next(
+ iter(
+ srvr for srvr in self.api_servers
+ if coll_req['source'] in {srvr.name, srvr.api_server}
+ ),
+ GalaxyAPI(
+ self.galaxy,
+ 'explicit_requirement_{name!s}'.format(
+ name=coll_req['name'],
+ ),
+ coll_req['source'],
+ validate_certs=context.CLIARGS['resolved_validate_certs'],
+ ),
+ )
+
+ return coll_req
+
+ @staticmethod
+ def exit_without_ignore(rc=1):
+ """
+ Exits with the specified return code unless the
+ option --ignore-errors was specified
+ """
+ if not context.CLIARGS['ignore_errors']:
+ raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
+
+ @staticmethod
+ def _display_role_info(role_info):
+
+ text = [u"", u"Role: %s" % to_text(role_info['name'])]
+
+ # Get the top-level 'description' first, falling back to galaxy_info['galaxy_info']['description'].
+ galaxy_info = role_info.get('galaxy_info', {})
+ description = role_info.get('description', galaxy_info.get('description', ''))
+ text.append(u"\tdescription: %s" % description)
+
+ for k in sorted(role_info.keys()):
+
+ if k in GalaxyCLI.SKIP_INFO_KEYS:
+ continue
+
+ if isinstance(role_info[k], dict):
+ text.append(u"\t%s:" % (k))
+ for key in sorted(role_info[k].keys()):
+ if key in GalaxyCLI.SKIP_INFO_KEYS:
+ continue
+ text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
+ else:
+ text.append(u"\t%s: %s" % (k, role_info[k]))
+
+ # make sure we have a trailing newline returned
+ text.append(u"")
+ return u'\n'.join(text)
+
+ @staticmethod
+ def _resolve_path(path):
+ return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
+
+ @staticmethod
+ def _get_skeleton_galaxy_yml(template_path, inject_data):
+ with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
+ meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
+
+ galaxy_meta = get_collections_galaxy_meta_info()
+
+ required_config = []
+ optional_config = []
+ for meta_entry in galaxy_meta:
+ config_list = required_config if meta_entry.get('required', False) else optional_config
+
+ value = inject_data.get(meta_entry['key'], None)
+ if not value:
+ meta_type = meta_entry.get('type', 'str')
+
+ if meta_type == 'str':
+ value = ''
+ elif meta_type == 'list':
+ value = []
+ elif meta_type == 'dict':
+ value = {}
+
+ meta_entry['value'] = value
+ config_list.append(meta_entry)
+
+ link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
+ const_pattern = re.compile(r"C\(([^)]+)\)")
+
+ def comment_ify(v):
+ if isinstance(v, list):
+ v = ". ".join([l.rstrip('.') for l in v])
+
+ v = link_pattern.sub(r"\1 <\2>", v)
+ v = const_pattern.sub(r"'\1'", v)
+
+ return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
+
+ loader = DataLoader()
+ templar = Templar(loader, variables={'required_config': required_config, 'optional_config': optional_config})
+ templar.environment.filters['comment_ify'] = comment_ify
+
+ meta_value = templar.template(meta_template)
+
+ return meta_value
+
+ def _require_one_of_collections_requirements(
+ self, collections, requirements_file,
+ signatures=None,
+ artifacts_manager=None,
+ ):
+ if collections and requirements_file:
+ raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
+ elif not collections and not requirements_file:
+ raise AnsibleError("You must specify a collection name or a requirements file.")
+ elif requirements_file:
+ if signatures is not None:
+ raise AnsibleError(
+ "The --signatures option and --requirements-file are mutually exclusive. "
+ "Use the --signatures with positional collection_name args or provide a "
+ "'signatures' key for requirements in the --requirements-file."
+ )
+ requirements_file = GalaxyCLI._resolve_path(requirements_file)
+ requirements = self._parse_requirements_file(
+ requirements_file,
+ allow_old_format=False,
+ artifacts_manager=artifacts_manager,
+ )
+ else:
+ requirements = {
+ 'collections': [
+ Requirement.from_string(coll_input, artifacts_manager, signatures)
+ for coll_input in collections
+ ],
+ 'roles': [],
+ }
+ return requirements
+
+ ############################
+ # execute actions
+ ############################
+
+ def execute_role(self):
+ """
+ Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
+ as listed below.
+ """
+ # To satisfy doc build
+ pass
+
+ def execute_collection(self):
+ """
+ Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
+ listed below.
+ """
+ # To satisfy doc build
+ pass
+
+ def execute_build(self):
+ """
+ Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
+ By default, this command builds from the current working directory. You can optionally pass in the
+ collection input path (where the ``galaxy.yml`` file is).
+ """
+ force = context.CLIARGS['force']
+ output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
+ b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
+
+ if not os.path.exists(b_output_path):
+ os.makedirs(b_output_path)
+ elif os.path.isfile(b_output_path):
+ raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
+
+ for collection_path in context.CLIARGS['args']:
+ collection_path = GalaxyCLI._resolve_path(collection_path)
+ build_collection(
+ to_text(collection_path, errors='surrogate_or_strict'),
+ to_text(output_path, errors='surrogate_or_strict'),
+ force,
+ )
+
+ @with_collection_artifacts_manager
+ def execute_download(self, artifacts_manager=None):
+ collections = context.CLIARGS['args']
+ no_deps = context.CLIARGS['no_deps']
+ download_path = context.CLIARGS['download_path']
+
+ requirements_file = context.CLIARGS['requirements']
+ if requirements_file:
+ requirements_file = GalaxyCLI._resolve_path(requirements_file)
+
+ requirements = self._require_one_of_collections_requirements(
+ collections, requirements_file,
+ artifacts_manager=artifacts_manager,
+ )['collections']
+
+ download_path = GalaxyCLI._resolve_path(download_path)
+ b_download_path = to_bytes(download_path, errors='surrogate_or_strict')
+ if not os.path.exists(b_download_path):
+ os.makedirs(b_download_path)
+
+ download_collections(
+ requirements, download_path, self.api_servers, no_deps,
+ context.CLIARGS['allow_pre_release'],
+ artifacts_manager=artifacts_manager,
+ )
+
+ return 0
+
+ def execute_init(self):
+ """
+ Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
+ Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
+ """
+
+ galaxy_type = context.CLIARGS['type']
+ init_path = context.CLIARGS['init_path']
+ force = context.CLIARGS['force']
+ obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
+
+ obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
+
+ inject_data = dict(
+ description='your {0} description'.format(galaxy_type),
+ ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
+ )
+ if galaxy_type == 'role':
+ inject_data.update(dict(
+ author='your name',
+ company='your company (optional)',
+ license='license (GPL-2.0-or-later, MIT, etc)',
+ role_name=obj_name,
+ role_type=context.CLIARGS['role_type'],
+ issue_tracker_url='http://example.com/issue/tracker',
+ repository_url='http://example.com/repository',
+ documentation_url='http://docs.example.com',
+ homepage_url='http://example.com',
+ min_ansible_version=ansible_version[:3], # x.y
+ dependencies=[],
+ ))
+
+ skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
+ obj_path = os.path.join(init_path, obj_name)
+ elif galaxy_type == 'collection':
+ namespace, collection_name = obj_name.split('.', 1)
+
+ inject_data.update(dict(
+ namespace=namespace,
+ collection_name=collection_name,
+ version='1.0.0',
+ readme='README.md',
+ authors=['your name <example@domain.com>'],
+ license=['GPL-2.0-or-later'],
+ repository='http://example.com/repository',
+ documentation='http://docs.example.com',
+ homepage='http://example.com',
+ issues='http://example.com/issue/tracker',
+ build_ignore=[],
+ ))
+
+ skeleton_ignore_expressions = C.GALAXY_COLLECTION_SKELETON_IGNORE
+ obj_path = os.path.join(init_path, namespace, collection_name)
+
+ b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
+
+ if os.path.exists(b_obj_path):
+ if os.path.isfile(obj_path):
+ raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
+ elif not force:
+ raise AnsibleError("- the directory %s already exists. "
+ "You can use --force to re-initialize this directory,\n"
+ "however it will reset any main.yml files that may have\n"
+ "been modified there already." % to_native(obj_path))
+
+ # delete the contents rather than the collection root in case init was run from the root (--init-path ../../)
+ for root, dirs, files in os.walk(b_obj_path, topdown=True):
+ for old_dir in dirs:
+ path = os.path.join(root, old_dir)
+ shutil.rmtree(path)
+ for old_file in files:
+ path = os.path.join(root, old_file)
+ os.unlink(path)
+
+ if obj_skeleton is not None:
+ own_skeleton = False
+ else:
+ own_skeleton = True
+ obj_skeleton = self.galaxy.default_role_skeleton_path
+ skeleton_ignore_expressions = ['^.*/.git_keep$']
+
+ obj_skeleton = os.path.expanduser(obj_skeleton)
+ skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
+
+ if not os.path.exists(obj_skeleton):
+ raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
+ to_native(obj_skeleton), galaxy_type)
+ )
+
+ loader = DataLoader()
+ templar = Templar(loader, variables=inject_data)
+
+ # create role directory
+ if not os.path.exists(b_obj_path):
+ os.makedirs(b_obj_path)
+
+ for root, dirs, files in os.walk(obj_skeleton, topdown=True):
+ rel_root = os.path.relpath(root, obj_skeleton)
+ rel_dirs = rel_root.split(os.sep)
+ rel_root_dir = rel_dirs[0]
+ if galaxy_type == 'collection':
+ # A collection can contain templates in playbooks/*/templates and roles/*/templates
+ in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
+ else:
+ in_templates_dir = rel_root_dir == 'templates'
+
+ # Filter out ignored directory names
+ # Use [:] to mutate the list os.walk uses
+ dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
+
+ for f in files:
+ filename, ext = os.path.splitext(f)
+
+ if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
+ continue
+
+ if galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
+ # Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
+ # dynamically which requires special options to be set.
+
+ # The templated data's keys must match the key name but the inject data contains collection_name
+ # instead of name. We just make a copy and change the key back to name for this file.
+ template_data = inject_data.copy()
+ template_data['name'] = template_data.pop('collection_name')
+
+ meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
+ b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
+ with open(b_dest_file, 'wb') as galaxy_obj:
+ galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
+ elif ext == ".j2" and not in_templates_dir:
+ src_template = os.path.join(root, f)
+ dest_file = os.path.join(obj_path, rel_root, filename)
+ template_data = to_text(loader._get_file_contents(src_template)[0], errors='surrogate_or_strict')
+ b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict')
+ with open(dest_file, 'wb') as df:
+ df.write(b_rendered)
+ else:
+ f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
+ shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
+
+ for d in dirs:
+ b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
+ if not os.path.exists(b_dir_path):
+ os.makedirs(b_dir_path)
+
+ display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
+
+ def execute_info(self):
+ """
+ prints out detailed information about an installed role as well as info available from the galaxy API.
+ """
+
+ roles_path = context.CLIARGS['roles_path']
+
+ data = ''
+ for role in context.CLIARGS['args']:
+
+ role_info = {'path': roles_path}
+ gr = GalaxyRole(self.galaxy, self.lazy_role_api, role)
+
+ install_info = gr.install_info
+ if install_info:
+ if 'version' in install_info:
+ install_info['installed_version'] = install_info['version']
+ del install_info['version']
+ role_info.update(install_info)
+
+ if not context.CLIARGS['offline']:
+ remote_data = None
+ try:
+ remote_data = self.api.lookup_role_by_name(role, False)
+ except AnsibleError as e:
+ if e.http_code == 400 and 'Bad Request' in e.message:
+ # Role does not exist in Ansible Galaxy
+ data = u"- the role %s was not found" % role
+ break
+
+ raise AnsibleError("Unable to find info about '%s': %s" % (role, e))
+
+ if remote_data:
+ role_info.update(remote_data)
+
+ elif context.CLIARGS['offline'] and not gr._exists:
+ data = u"- the role %s was not found" % role
+ break
+
+ if gr.metadata:
+ role_info.update(gr.metadata)
+
+ req = RoleRequirement()
+ role_spec = req.role_yaml_parse({'role': role})
+ if role_spec:
+ role_info.update(role_spec)
+
+ data += self._display_role_info(role_info)
+
+ self.pager(data)
+
+ @with_collection_artifacts_manager
+ def execute_verify(self, artifacts_manager=None):
+
+ collections = context.CLIARGS['args']
+ search_paths = context.CLIARGS['collections_path']
+ ignore_errors = context.CLIARGS['ignore_errors']
+ local_verify_only = context.CLIARGS['offline']
+ requirements_file = context.CLIARGS['requirements']
+ signatures = context.CLIARGS['signatures']
+ if signatures is not None:
+ signatures = list(signatures)
+
+ requirements = self._require_one_of_collections_requirements(
+ collections, requirements_file,
+ signatures=signatures,
+ artifacts_manager=artifacts_manager,
+ )['collections']
+
+ resolved_paths = [validate_collection_path(GalaxyCLI._resolve_path(path)) for path in search_paths]
+
+ results = verify_collections(
+ requirements, resolved_paths,
+ self.api_servers, ignore_errors,
+ local_verify_only=local_verify_only,
+ artifacts_manager=artifacts_manager,
+ )
+
+ if any(result for result in results if not result.success):
+ return 1
+
+ return 0
+
+ @with_collection_artifacts_manager
+ def execute_install(self, artifacts_manager=None):
+ """
+ Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
+ You can pass in a list (roles or collections) or use the file
+ option listed below (these are mutually exclusive). If you pass in a list, it
+ can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
+
+ :param artifacts_manager: Artifacts manager.
+ """
+ install_items = context.CLIARGS['args']
+ requirements_file = context.CLIARGS['requirements']
+ collection_path = None
+ signatures = context.CLIARGS.get('signatures')
+ if signatures is not None:
+ signatures = list(signatures)
+
+ if requirements_file:
+ requirements_file = GalaxyCLI._resolve_path(requirements_file)
+
+ two_type_warning = "The requirements file '%s' contains {0}s which will be ignored. To install these {0}s " \
+ "run 'ansible-galaxy {0} install -r' or to install both at the same time run " \
+ "'ansible-galaxy install -r' without a custom install path." % to_text(requirements_file)
+
+ # TODO: Would be nice to share the same behaviour with args and -r in collections and roles.
+ collection_requirements = []
+ role_requirements = []
+ if context.CLIARGS['type'] == 'collection':
+ collection_path = GalaxyCLI._resolve_path(context.CLIARGS['collections_path'])
+ requirements = self._require_one_of_collections_requirements(
+ install_items, requirements_file,
+ signatures=signatures,
+ artifacts_manager=artifacts_manager,
+ )
+
+ collection_requirements = requirements['collections']
+ if requirements['roles']:
+ display.vvv(two_type_warning.format('role'))
+ else:
+ if not install_items and requirements_file is None:
+ raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
+
+ if requirements_file:
+ if not (requirements_file.endswith('.yaml') or requirements_file.endswith('.yml')):
+ raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
+
+ galaxy_args = self._raw_args
+ will_install_collections = self._implicit_role and '-p' not in galaxy_args and '--roles-path' not in galaxy_args
+
+ requirements = self._parse_requirements_file(
+ requirements_file,
+ artifacts_manager=artifacts_manager,
+ validate_signature_options=will_install_collections,
+ )
+ role_requirements = requirements['roles']
+
+ # We can only install collections and roles at the same time if the type wasn't specified and the -p
+ # argument was not used. If collections are present in the requirements then at least display a msg.
+ if requirements['collections'] and (not self._implicit_role or '-p' in galaxy_args or
+ '--roles-path' in galaxy_args):
+
+ # We only want to display a warning if 'ansible-galaxy install -r ... -p ...'. Other cases the user
+ # was explicit about the type and shouldn't care that collections were skipped.
+ display_func = display.warning if self._implicit_role else display.vvv
+ display_func(two_type_warning.format('collection'))
+ else:
+ collection_path = self._get_default_collection_path()
+ collection_requirements = requirements['collections']
+ else:
+ # roles were specified directly, so we'll just go out grab them
+ # (and their dependencies, unless the user doesn't want us to).
+ for rname in context.CLIARGS['args']:
+ role = RoleRequirement.role_yaml_parse(rname.strip())
+ role_requirements.append(GalaxyRole(self.galaxy, self.lazy_role_api, **role))
+
+ if not role_requirements and not collection_requirements:
+ display.display("Skipping install, no requirements found")
+ return
+
+ if role_requirements:
+ display.display("Starting galaxy role install process")
+ self._execute_install_role(role_requirements)
+
+ if collection_requirements:
+ display.display("Starting galaxy collection install process")
+ # Collections can technically be installed even when ansible-galaxy is in role mode so we need to pass in
+ # the install path as context.CLIARGS['collections_path'] won't be set (default is calculated above).
+ self._execute_install_collection(
+ collection_requirements, collection_path,
+ artifacts_manager=artifacts_manager,
+ )
+
+ def _execute_install_collection(
+ self, requirements, path, artifacts_manager,
+ ):
+ force = context.CLIARGS['force']
+ ignore_errors = context.CLIARGS['ignore_errors']
+ no_deps = context.CLIARGS['no_deps']
+ force_with_deps = context.CLIARGS['force_with_deps']
+ try:
+ disable_gpg_verify = context.CLIARGS['disable_gpg_verify']
+ except KeyError:
+ if self._implicit_role:
+ raise AnsibleError(
+ 'Unable to properly parse command line arguments. Please use "ansible-galaxy collection install" '
+ 'instead of "ansible-galaxy install".'
+ )
+ raise
+
+ # If `ansible-galaxy install` is used, collection-only options aren't available to the user and won't be in context.CLIARGS
+ allow_pre_release = context.CLIARGS.get('allow_pre_release', False)
+ upgrade = context.CLIARGS.get('upgrade', False)
+
+ collections_path = C.COLLECTIONS_PATHS
+ if len([p for p in collections_path if p.startswith(path)]) == 0:
+ display.warning("The specified collections path '%s' is not part of the configured Ansible "
+ "collections paths '%s'. The installed collection will not be picked up in an Ansible "
+ "run, unless within a playbook-adjacent collections directory." % (to_text(path), to_text(":".join(collections_path))))
+
+ output_path = validate_collection_path(path)
+ b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
+ if not os.path.exists(b_output_path):
+ os.makedirs(b_output_path)
+
+ install_collections(
+ requirements, output_path, self.api_servers, ignore_errors,
+ no_deps, force, force_with_deps, upgrade,
+ allow_pre_release=allow_pre_release,
+ artifacts_manager=artifacts_manager,
+ disable_gpg_verify=disable_gpg_verify,
+ offline=context.CLIARGS.get('offline', False),
+ )
+
+ return 0
+
+ def _execute_install_role(self, requirements):
+ role_file = context.CLIARGS['requirements']
+ no_deps = context.CLIARGS['no_deps']
+ force_deps = context.CLIARGS['force_with_deps']
+ force = context.CLIARGS['force'] or force_deps
+
+ for role in requirements:
+ # only process roles in roles files when names matches if given
+ if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
+ display.vvv('Skipping role %s' % role.name)
+ continue
+
+ display.vvv('Processing role %s ' % role.name)
+
+ # query the galaxy API for the role data
+
+ if role.install_info is not None:
+ if role.install_info['version'] != role.version or force:
+ if force:
+ display.display('- changing role %s from %s to %s' %
+ (role.name, role.install_info['version'], role.version or "unspecified"))
+ role.remove()
+ else:
+ display.warning('- %s (%s) is already installed - use --force to change version to %s' %
+ (role.name, role.install_info['version'], role.version or "unspecified"))
+ continue
+ else:
+ if not force:
+ display.display('- %s is already installed, skipping.' % str(role))
+ continue
+
+ try:
+ installed = role.install()
+ except AnsibleError as e:
+ display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
+ self.exit_without_ignore()
+ continue
+
+ # install dependencies, if we want them
+ if not no_deps and installed:
+ if not role.metadata:
+ # NOTE: the meta file is also required for installing the role, not just dependencies
+ display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
+ else:
+ role_dependencies = role.metadata_dependencies + role.requirements
+ for dep in role_dependencies:
+ display.debug('Installing dep %s' % dep)
+ dep_req = RoleRequirement()
+ dep_info = dep_req.role_yaml_parse(dep)
+ dep_role = GalaxyRole(self.galaxy, self.lazy_role_api, **dep_info)
+ if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
+ # we know we can skip this, as it's not going to
+ # be found on galaxy.ansible.com
+ continue
+ if dep_role.install_info is None:
+ if dep_role not in requirements:
+ display.display('- adding dependency: %s' % to_text(dep_role))
+ requirements.append(dep_role)
+ else:
+ display.display('- dependency %s already pending installation.' % dep_role.name)
+ else:
+ if dep_role.install_info['version'] != dep_role.version:
+ if force_deps:
+ display.display('- changing dependent role %s from %s to %s' %
+ (dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
+ dep_role.remove()
+ requirements.append(dep_role)
+ else:
+ display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
+ (to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
+ else:
+ if force_deps:
+ requirements.append(dep_role)
+ else:
+ display.display('- dependency %s is already installed, skipping.' % dep_role.name)
+
+ if not installed:
+ display.warning("- %s was NOT installed successfully." % role.name)
+ self.exit_without_ignore()
+
+ return 0
+
+ def execute_remove(self):
+ """
+ removes the list of roles passed as arguments from the local system.
+ """
+
+ if not context.CLIARGS['args']:
+ raise AnsibleOptionsError('- you must specify at least one role to remove.')
+
+ for role_name in context.CLIARGS['args']:
+ role = GalaxyRole(self.galaxy, self.api, role_name)
+ try:
+ if role.remove():
+ display.display('- successfully removed %s' % role_name)
+ else:
+ display.display('- %s is not installed, skipping.' % role_name)
+ except Exception as e:
+ raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
+
+ return 0
+
+ def execute_list(self):
+ """
+ List installed collections or roles
+ """
+
+ if context.CLIARGS['type'] == 'role':
+ self.execute_list_role()
+ elif context.CLIARGS['type'] == 'collection':
+ self.execute_list_collection()
+
+ def execute_list_role(self):
+ """
+ List all roles installed on the local system or a specific role
+ """
+
+ path_found = False
+ role_found = False
+ warnings = []
+ roles_search_paths = context.CLIARGS['roles_path']
+ role_name = context.CLIARGS['role']
+
+ for path in roles_search_paths:
+ role_path = GalaxyCLI._resolve_path(path)
+ if os.path.isdir(path):
+ path_found = True
+ else:
+ warnings.append("- the configured path {0} does not exist.".format(path))
+ continue
+
+ if role_name:
+ # show the requested role, if it exists
+ gr = GalaxyRole(self.galaxy, self.lazy_role_api, role_name, path=os.path.join(role_path, role_name))
+ if os.path.isdir(gr.path):
+ role_found = True
+ display.display('# %s' % os.path.dirname(gr.path))
+ _display_role(gr)
+ break
+ warnings.append("- the role %s was not found" % role_name)
+ else:
+ if not os.path.exists(role_path):
+ warnings.append("- the configured path %s does not exist." % role_path)
+ continue
+
+ if not os.path.isdir(role_path):
+ warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
+ continue
+
+ display.display('# %s' % role_path)
+ path_files = os.listdir(role_path)
+ for path_file in path_files:
+ gr = GalaxyRole(self.galaxy, self.lazy_role_api, path_file, path=path)
+ if gr.metadata:
+ _display_role(gr)
+
+ # Do not warn if the role was found in any of the search paths
+ if role_found and role_name:
+ warnings = []
+
+ for w in warnings:
+ display.warning(w)
+
+ if not path_found:
+ raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
+
+ return 0
+
+ @with_collection_artifacts_manager
+ def execute_list_collection(self, artifacts_manager=None):
+ """
+ List all collections installed on the local system
+
+ :param artifacts_manager: Artifacts manager.
+ """
+ if artifacts_manager is not None:
+ artifacts_manager.require_build_metadata = False
+
+ output_format = context.CLIARGS['output_format']
+ collections_search_paths = set(context.CLIARGS['collections_path'])
+ collection_name = context.CLIARGS['collection']
+ default_collections_path = AnsibleCollectionConfig.collection_paths
+ collections_in_paths = {}
+
+ warnings = []
+ path_found = False
+ collection_found = False
+ for path in collections_search_paths:
+ collection_path = GalaxyCLI._resolve_path(path)
+ if not os.path.exists(path):
+ if path in default_collections_path:
+ # don't warn for missing default paths
+ continue
+ warnings.append("- the configured path {0} does not exist.".format(collection_path))
+ continue
+
+ if not os.path.isdir(collection_path):
+ warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
+ continue
+
+ path_found = True
+
+ if collection_name:
+ # list a specific collection
+
+ validate_collection_name(collection_name)
+ namespace, collection = collection_name.split('.')
+
+ collection_path = validate_collection_path(collection_path)
+ b_collection_path = to_bytes(os.path.join(collection_path, namespace, collection), errors='surrogate_or_strict')
+
+ if not os.path.exists(b_collection_path):
+ warnings.append("- unable to find {0} in collection paths".format(collection_name))
+ continue
+
+ if not os.path.isdir(collection_path):
+ warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
+ continue
+
+ collection_found = True
+
+ try:
+ collection = Requirement.from_dir_path_as_unknown(
+ b_collection_path,
+ artifacts_manager,
+ )
+ except ValueError as val_err:
+ six.raise_from(AnsibleError(val_err), val_err)
+
+ if output_format in {'yaml', 'json'}:
+ collections_in_paths[collection_path] = {
+ collection.fqcn: {'version': collection.ver}
+ }
+
+ continue
+
+ fqcn_width, version_width = _get_collection_widths([collection])
+
+ _display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
+ _display_collection(collection, fqcn_width, version_width)
+
+ else:
+ # list all collections
+ collection_path = validate_collection_path(path)
+ if os.path.isdir(collection_path):
+ display.vvv("Searching {0} for collections".format(collection_path))
+ collections = list(find_existing_collections(
+ collection_path, artifacts_manager,
+ ))
+ else:
+ # There was no 'ansible_collections/' directory in the path, so there
+ # or no collections here.
+ display.vvv("No 'ansible_collections' directory found at {0}".format(collection_path))
+ continue
+
+ if not collections:
+ display.vvv("No collections found at {0}".format(collection_path))
+ continue
+
+ if output_format in {'yaml', 'json'}:
+ collections_in_paths[collection_path] = {
+ collection.fqcn: {'version': collection.ver} for collection in collections
+ }
+
+ continue
+
+ # Display header
+ fqcn_width, version_width = _get_collection_widths(collections)
+ _display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
+
+ # Sort collections by the namespace and name
+ for collection in sorted(collections, key=to_text):
+ _display_collection(collection, fqcn_width, version_width)
+
+ # Do not warn if the specific collection was found in any of the search paths
+ if collection_found and collection_name:
+ warnings = []
+
+ for w in warnings:
+ display.warning(w)
+
+ if not path_found:
+ raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
+
+ if output_format == 'json':
+ display.display(json.dumps(collections_in_paths))
+ elif output_format == 'yaml':
+ display.display(yaml_dump(collections_in_paths))
+
+ return 0
+
+ def execute_publish(self):
+ """
+ Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
+ """
+ collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
+ wait = context.CLIARGS['wait']
+ timeout = context.CLIARGS['import_timeout']
+
+ publish_collection(collection_path, self.api, wait, timeout)
+
+ def execute_search(self):
+ ''' searches for roles on the Ansible Galaxy server'''
+ page_size = 1000
+ search = None
+
+ if context.CLIARGS['args']:
+ search = '+'.join(context.CLIARGS['args'])
+
+ if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
+ raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
+
+ response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
+ tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
+
+ if response['count'] == 0:
+ display.display("No roles match your search.", color=C.COLOR_ERROR)
+ return 1
+
+ data = [u'']
+
+ if response['count'] > page_size:
+ data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
+ else:
+ data.append(u"Found %d roles matching your search:" % response['count'])
+
+ max_len = []
+ for role in response['results']:
+ max_len.append(len(role['username'] + '.' + role['name']))
+ name_len = max(max_len)
+ format_str = u" %%-%ds %%s" % name_len
+ data.append(u'')
+ data.append(format_str % (u"Name", u"Description"))
+ data.append(format_str % (u"----", u"-----------"))
+ for role in response['results']:
+ data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
+
+ data = u'\n'.join(data)
+ self.pager(data)
+
+ return 0
+
+ def execute_import(self):
+ """ used to import a role into Ansible Galaxy """
+
+ colors = {
+ 'INFO': 'normal',
+ 'WARNING': C.COLOR_WARN,
+ 'ERROR': C.COLOR_ERROR,
+ 'SUCCESS': C.COLOR_OK,
+ 'FAILED': C.COLOR_ERROR,
+ }
+
+ github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
+ github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
+
+ if context.CLIARGS['check_status']:
+ task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
+ else:
+ # Submit an import request
+ task = self.api.create_import_task(github_user, github_repo,
+ reference=context.CLIARGS['reference'],
+ role_name=context.CLIARGS['role_name'])
+
+ if len(task) > 1:
+ # found multiple roles associated with github_user/github_repo
+ display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
+ color='yellow')
+ display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
+ for t in task:
+ display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
+ display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
+ color=C.COLOR_CHANGED)
+ return 0
+ # found a single role as expected
+ display.display("Successfully submitted import request %d" % task[0]['id'])
+ if not context.CLIARGS['wait']:
+ display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
+ display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
+
+ if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
+ # Get the status of the import
+ msg_list = []
+ finished = False
+ while not finished:
+ task = self.api.get_import_task(task_id=task[0]['id'])
+ for msg in task[0]['summary_fields']['task_messages']:
+ if msg['id'] not in msg_list:
+ display.display(msg['message_text'], color=colors[msg['message_type']])
+ msg_list.append(msg['id'])
+ if task[0]['state'] in ['SUCCESS', 'FAILED']:
+ finished = True
+ else:
+ time.sleep(10)
+
+ return 0
+
+ def execute_setup(self):
+ """ Setup an integration from Github or Travis for Ansible Galaxy roles"""
+
+ if context.CLIARGS['setup_list']:
+ # List existing integration secrets
+ secrets = self.api.list_secrets()
+ if len(secrets) == 0:
+ # None found
+ display.display("No integrations found.")
+ return 0
+ display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
+ display.display("---------- ---------- ----------", color=C.COLOR_OK)
+ for secret in secrets:
+ display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
+ secret['github_repo']), color=C.COLOR_OK)
+ return 0
+
+ if context.CLIARGS['remove_id']:
+ # Remove a secret
+ self.api.remove_secret(context.CLIARGS['remove_id'])
+ display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
+ return 0
+
+ source = context.CLIARGS['source']
+ github_user = context.CLIARGS['github_user']
+ github_repo = context.CLIARGS['github_repo']
+ secret = context.CLIARGS['secret']
+
+ resp = self.api.add_secret(source, github_user, github_repo, secret)
+ display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
+
+ return 0
+
+ def execute_delete(self):
+ """ Delete a role from Ansible Galaxy. """
+
+ github_user = context.CLIARGS['github_user']
+ github_repo = context.CLIARGS['github_repo']
+ resp = self.api.delete_role(github_user, github_repo)
+
+ if len(resp['deleted_roles']) > 1:
+ display.display("Deleted the following roles:")
+ display.display("ID User Name")
+ display.display("------ --------------- ----------")
+ for role in resp['deleted_roles']:
+ display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
+
+ display.display(resp['status'])
+
+ return 0
+
+
+def main(args=None):
+ GalaxyCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/bin/ansible-inventory b/bin/ansible-inventory
new file mode 100755
index 0000000..e8ed75e
--- /dev/null
+++ b/bin/ansible-inventory
@@ -0,0 +1,417 @@
+#!/usr/bin/env python
+# Copyright: (c) 2017, Brian Coca <bcoca@ansible.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import sys
+
+import argparse
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.utils.vars import combine_vars
+from ansible.utils.display import Display
+from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_path
+
+display = Display()
+
+INTERNAL_VARS = frozenset(['ansible_diff_mode',
+ 'ansible_config_file',
+ 'ansible_facts',
+ 'ansible_forks',
+ 'ansible_inventory_sources',
+ 'ansible_limit',
+ 'ansible_playbook_python',
+ 'ansible_run_tags',
+ 'ansible_skip_tags',
+ 'ansible_verbosity',
+ 'ansible_version',
+ 'inventory_dir',
+ 'inventory_file',
+ 'inventory_hostname',
+ 'inventory_hostname_short',
+ 'groups',
+ 'group_names',
+ 'omit',
+ 'playbook_dir', ])
+
+
+class InventoryCLI(CLI):
+ ''' used to display or dump the configured inventory as Ansible sees it '''
+
+ name = 'ansible-inventory'
+
+ ARGUMENTS = {'host': 'The name of a host to match in the inventory, relevant when using --list',
+ 'group': 'The name of a group in the inventory, relevant when using --graph', }
+
+ def __init__(self, args):
+
+ super(InventoryCLI, self).__init__(args)
+ self.vm = None
+ self.loader = None
+ self.inventory = None
+
+ def init_parser(self):
+ super(InventoryCLI, self).init_parser(
+ usage='usage: %prog [options] [host|group]',
+ epilog='Show Ansible inventory information, by default it uses the inventory script JSON format')
+
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_basedir_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
+
+ # remove unused default options
+ self.parser.add_argument('-l', '--limit', help=argparse.SUPPRESS, action=opt_help.UnrecognizedArgument, nargs='?')
+ self.parser.add_argument('--list-hosts', help=argparse.SUPPRESS, action=opt_help.UnrecognizedArgument)
+
+ self.parser.add_argument('args', metavar='host|group', nargs='?')
+
+ # Actions
+ action_group = self.parser.add_argument_group("Actions", "One of following must be used on invocation, ONLY ONE!")
+ action_group.add_argument("--list", action="store_true", default=False, dest='list', help='Output all hosts info, works as inventory script')
+ action_group.add_argument("--host", action="store", default=None, dest='host', help='Output specific host info, works as inventory script')
+ action_group.add_argument("--graph", action="store_true", default=False, dest='graph',
+ help='create inventory graph, if supplying pattern it must be a valid group name')
+ self.parser.add_argument_group(action_group)
+
+ # graph
+ self.parser.add_argument("-y", "--yaml", action="store_true", default=False, dest='yaml',
+ help='Use YAML format instead of default JSON, ignored for --graph')
+ self.parser.add_argument('--toml', action='store_true', default=False, dest='toml',
+ help='Use TOML format instead of default JSON, ignored for --graph')
+ self.parser.add_argument("--vars", action="store_true", default=False, dest='show_vars',
+ help='Add vars to graph display, ignored unless used with --graph')
+
+ # list
+ self.parser.add_argument("--export", action="store_true", default=C.INVENTORY_EXPORT, dest='export',
+ help="When doing an --list, represent in a way that is optimized for export,"
+ "not as an accurate representation of how Ansible has processed it")
+ self.parser.add_argument('--output', default=None, dest='output_file',
+ help="When doing --list, send the inventory to a file instead of to the screen")
+ # self.parser.add_argument("--ignore-vars-plugins", action="store_true", default=False, dest='ignore_vars_plugins',
+ # help="When doing an --list, skip vars data from vars plugins, by default, this would include group_vars/ and host_vars/")
+
+ def post_process_args(self, options):
+ options = super(InventoryCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options)
+
+ # there can be only one! and, at least, one!
+ used = 0
+ for opt in (options.list, options.host, options.graph):
+ if opt:
+ used += 1
+ if used == 0:
+ raise AnsibleOptionsError("No action selected, at least one of --host, --graph or --list needs to be specified.")
+ elif used > 1:
+ raise AnsibleOptionsError("Conflicting options used, only one of --host, --graph or --list can be used at the same time.")
+
+ # set host pattern to default if not supplied
+ if options.args:
+ options.pattern = options.args
+ else:
+ options.pattern = 'all'
+
+ return options
+
+ def run(self):
+
+ super(InventoryCLI, self).run()
+
+ # Initialize needed objects
+ self.loader, self.inventory, self.vm = self._play_prereqs()
+
+ results = None
+ if context.CLIARGS['host']:
+ hosts = self.inventory.get_hosts(context.CLIARGS['host'])
+ if len(hosts) != 1:
+ raise AnsibleOptionsError("You must pass a single valid host to --host parameter")
+
+ myvars = self._get_host_variables(host=hosts[0])
+
+ # FIXME: should we template first?
+ results = self.dump(myvars)
+
+ elif context.CLIARGS['graph']:
+ results = self.inventory_graph()
+ elif context.CLIARGS['list']:
+ top = self._get_group('all')
+ if context.CLIARGS['yaml']:
+ results = self.yaml_inventory(top)
+ elif context.CLIARGS['toml']:
+ results = self.toml_inventory(top)
+ else:
+ results = self.json_inventory(top)
+ results = self.dump(results)
+
+ if results:
+ outfile = context.CLIARGS['output_file']
+ if outfile is None:
+ # FIXME: pager?
+ display.display(results)
+ else:
+ try:
+ with open(to_bytes(outfile), 'wb') as f:
+ f.write(to_bytes(results))
+ except (OSError, IOError) as e:
+ raise AnsibleError('Unable to write to destination file (%s): %s' % (to_native(outfile), to_native(e)))
+ sys.exit(0)
+
+ sys.exit(1)
+
+ @staticmethod
+ def dump(stuff):
+
+ if context.CLIARGS['yaml']:
+ import yaml
+ from ansible.parsing.yaml.dumper import AnsibleDumper
+ results = to_text(yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False, allow_unicode=True))
+ elif context.CLIARGS['toml']:
+ from ansible.plugins.inventory.toml import toml_dumps
+ try:
+ results = toml_dumps(stuff)
+ except TypeError as e:
+ raise AnsibleError(
+ 'The source inventory contains a value that cannot be represented in TOML: %s' % e
+ )
+ except KeyError as e:
+ raise AnsibleError(
+ 'The source inventory contains a non-string key (%s) which cannot be represented in TOML. '
+ 'The specified key will need to be converted to a string. Be aware that if your playbooks '
+ 'expect this key to be non-string, your playbooks will need to be modified to support this '
+ 'change.' % e.args[0]
+ )
+ else:
+ import json
+ from ansible.parsing.ajson import AnsibleJSONEncoder
+ try:
+ results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=True, indent=4, preprocess_unsafe=True, ensure_ascii=False)
+ except TypeError as e:
+ results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=False, indent=4, preprocess_unsafe=True, ensure_ascii=False)
+ display.warning("Could not sort JSON output due to issues while sorting keys: %s" % to_native(e))
+
+ return results
+
+ def _get_group_variables(self, group):
+
+ # get info from inventory source
+ res = group.get_vars()
+
+ # Always load vars plugins
+ res = combine_vars(res, get_vars_from_inventory_sources(self.loader, self.inventory._sources, [group], 'all'))
+ if context.CLIARGS['basedir']:
+ res = combine_vars(res, get_vars_from_path(self.loader, context.CLIARGS['basedir'], [group], 'all'))
+
+ if group.priority != 1:
+ res['ansible_group_priority'] = group.priority
+
+ return self._remove_internal(res)
+
+ def _get_host_variables(self, host):
+
+ if context.CLIARGS['export']:
+ # only get vars defined directly host
+ hostvars = host.get_vars()
+
+ # Always load vars plugins
+ hostvars = combine_vars(hostvars, get_vars_from_inventory_sources(self.loader, self.inventory._sources, [host], 'all'))
+ if context.CLIARGS['basedir']:
+ hostvars = combine_vars(hostvars, get_vars_from_path(self.loader, context.CLIARGS['basedir'], [host], 'all'))
+ else:
+ # get all vars flattened by host, but skip magic hostvars
+ hostvars = self.vm.get_vars(host=host, include_hostvars=False, stage='all')
+
+ return self._remove_internal(hostvars)
+
+ def _get_group(self, gname):
+ group = self.inventory.groups.get(gname)
+ return group
+
+ @staticmethod
+ def _remove_internal(dump):
+
+ for internal in INTERNAL_VARS:
+ if internal in dump:
+ del dump[internal]
+
+ return dump
+
+ @staticmethod
+ def _remove_empty(dump):
+ # remove empty keys
+ for x in ('hosts', 'vars', 'children'):
+ if x in dump and not dump[x]:
+ del dump[x]
+
+ @staticmethod
+ def _show_vars(dump, depth):
+ result = []
+ for (name, val) in sorted(dump.items()):
+ result.append(InventoryCLI._graph_name('{%s = %s}' % (name, val), depth))
+ return result
+
+ @staticmethod
+ def _graph_name(name, depth=0):
+ if depth:
+ name = " |" * (depth) + "--%s" % name
+ return name
+
+ def _graph_group(self, group, depth=0):
+
+ result = [self._graph_name('@%s:' % group.name, depth)]
+ depth = depth + 1
+ for kid in group.child_groups:
+ result.extend(self._graph_group(kid, depth))
+
+ if group.name != 'all':
+ for host in group.hosts:
+ result.append(self._graph_name(host.name, depth))
+ if context.CLIARGS['show_vars']:
+ result.extend(self._show_vars(self._get_host_variables(host), depth + 1))
+
+ if context.CLIARGS['show_vars']:
+ result.extend(self._show_vars(self._get_group_variables(group), depth))
+
+ return result
+
+ def inventory_graph(self):
+
+ start_at = self._get_group(context.CLIARGS['pattern'])
+ if start_at:
+ return '\n'.join(self._graph_group(start_at))
+ else:
+ raise AnsibleOptionsError("Pattern must be valid group name when using --graph")
+
+ def json_inventory(self, top):
+
+ seen = set()
+
+ def format_group(group):
+ results = {}
+ results[group.name] = {}
+ if group.name != 'all':
+ results[group.name]['hosts'] = [h.name for h in group.hosts]
+ results[group.name]['children'] = []
+ for subgroup in group.child_groups:
+ results[group.name]['children'].append(subgroup.name)
+ if subgroup.name not in seen:
+ results.update(format_group(subgroup))
+ seen.add(subgroup.name)
+ if context.CLIARGS['export']:
+ results[group.name]['vars'] = self._get_group_variables(group)
+
+ self._remove_empty(results[group.name])
+ if not results[group.name]:
+ del results[group.name]
+
+ return results
+
+ results = format_group(top)
+
+ # populate meta
+ results['_meta'] = {'hostvars': {}}
+ hosts = self.inventory.get_hosts()
+ for host in hosts:
+ hvars = self._get_host_variables(host)
+ if hvars:
+ results['_meta']['hostvars'][host.name] = hvars
+
+ return results
+
+ def yaml_inventory(self, top):
+
+ seen = []
+
+ def format_group(group):
+ results = {}
+
+ # initialize group + vars
+ results[group.name] = {}
+
+ # subgroups
+ results[group.name]['children'] = {}
+ for subgroup in group.child_groups:
+ if subgroup.name != 'all':
+ results[group.name]['children'].update(format_group(subgroup))
+
+ # hosts for group
+ results[group.name]['hosts'] = {}
+ if group.name != 'all':
+ for h in group.hosts:
+ myvars = {}
+ if h.name not in seen: # avoid defining host vars more than once
+ seen.append(h.name)
+ myvars = self._get_host_variables(host=h)
+ results[group.name]['hosts'][h.name] = myvars
+
+ if context.CLIARGS['export']:
+ gvars = self._get_group_variables(group)
+ if gvars:
+ results[group.name]['vars'] = gvars
+
+ self._remove_empty(results[group.name])
+
+ return results
+
+ return format_group(top)
+
+ def toml_inventory(self, top):
+ seen = set()
+ has_ungrouped = bool(next(g.hosts for g in top.child_groups if g.name == 'ungrouped'))
+
+ def format_group(group):
+ results = {}
+ results[group.name] = {}
+
+ results[group.name]['children'] = []
+ for subgroup in group.child_groups:
+ if subgroup.name == 'ungrouped' and not has_ungrouped:
+ continue
+ if group.name != 'all':
+ results[group.name]['children'].append(subgroup.name)
+ results.update(format_group(subgroup))
+
+ if group.name != 'all':
+ for host in group.hosts:
+ if host.name not in seen:
+ seen.add(host.name)
+ host_vars = self._get_host_variables(host=host)
+ else:
+ host_vars = {}
+ try:
+ results[group.name]['hosts'][host.name] = host_vars
+ except KeyError:
+ results[group.name]['hosts'] = {host.name: host_vars}
+
+ if context.CLIARGS['export']:
+ results[group.name]['vars'] = self._get_group_variables(group)
+
+ self._remove_empty(results[group.name])
+ if not results[group.name]:
+ del results[group.name]
+
+ return results
+
+ results = format_group(top)
+
+ return results
+
+
+def main(args=None):
+ InventoryCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/bin/ansible-playbook b/bin/ansible-playbook
new file mode 100755
index 0000000..c94cf0f
--- /dev/null
+++ b/bin/ansible-playbook
@@ -0,0 +1,231 @@
+#!/usr/bin/env python
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import os
+import stat
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleError
+from ansible.executor.playbook_executor import PlaybookExecutor
+from ansible.module_utils._text import to_bytes
+from ansible.playbook.block import Block
+from ansible.plugins.loader import add_all_plugin_dirs
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path
+from ansible.utils.display import Display
+
+
+display = Display()
+
+
+class PlaybookCLI(CLI):
+ ''' the tool to run *Ansible playbooks*, which are a configuration and multinode deployment system.
+ See the project home page (https://docs.ansible.com) for more information. '''
+
+ name = 'ansible-playbook'
+
+ def init_parser(self):
+
+ # create parser for CLI options
+ super(PlaybookCLI, self).init_parser(
+ usage="%prog [options] playbook.yml [playbook2 ...]",
+ desc="Runs Ansible playbooks, executing the defined tasks on the targeted hosts.")
+
+ opt_help.add_connect_options(self.parser)
+ opt_help.add_meta_options(self.parser)
+ opt_help.add_runas_options(self.parser)
+ opt_help.add_subset_options(self.parser)
+ opt_help.add_check_options(self.parser)
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_fork_options(self.parser)
+ opt_help.add_module_options(self.parser)
+
+ # ansible playbook specific opts
+ self.parser.add_argument('--list-tasks', dest='listtasks', action='store_true',
+ help="list all tasks that would be executed")
+ self.parser.add_argument('--list-tags', dest='listtags', action='store_true',
+ help="list all available tags")
+ self.parser.add_argument('--step', dest='step', action='store_true',
+ help="one-step-at-a-time: confirm each task before running")
+ self.parser.add_argument('--start-at-task', dest='start_at_task',
+ help="start the playbook at the task matching this name")
+ self.parser.add_argument('args', help='Playbook(s)', metavar='playbook', nargs='+')
+
+ def post_process_args(self, options):
+ options = super(PlaybookCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options, runas_opts=True, fork_opts=True)
+
+ return options
+
+ def run(self):
+
+ super(PlaybookCLI, self).run()
+
+ # Note: slightly wrong, this is written so that implicit localhost
+ # manages passwords
+ sshpass = None
+ becomepass = None
+ passwords = {}
+
+ # initial error check, to make sure all specified playbooks are accessible
+ # before we start running anything through the playbook executor
+ # also prep plugin paths
+ b_playbook_dirs = []
+ for playbook in context.CLIARGS['args']:
+
+ # resolve if it is collection playbook with FQCN notation, if not, leaves unchanged
+ resource = _get_collection_playbook_path(playbook)
+ if resource is not None:
+ playbook_collection = resource[2]
+ else:
+ # not an FQCN so must be a file
+ if not os.path.exists(playbook):
+ raise AnsibleError("the playbook: %s could not be found" % playbook)
+ if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
+ raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
+
+ # check if playbook is from collection (path can be passed directly)
+ playbook_collection = _get_collection_name_from_path(playbook)
+
+ # don't add collection playbooks to adjacency search path
+ if not playbook_collection:
+ # setup dirs to enable loading plugins from all playbooks in case they add callbacks/inventory/etc
+ b_playbook_dir = os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict')))
+ add_all_plugin_dirs(b_playbook_dir)
+ b_playbook_dirs.append(b_playbook_dir)
+
+ if b_playbook_dirs:
+ # allow collections adjacent to these playbooks
+ # we use list copy to avoid opening up 'adjacency' in the previous loop
+ AnsibleCollectionConfig.playbook_paths = b_playbook_dirs
+
+ # don't deal with privilege escalation or passwords when we don't need to
+ if not (context.CLIARGS['listhosts'] or context.CLIARGS['listtasks'] or
+ context.CLIARGS['listtags'] or context.CLIARGS['syntax']):
+ (sshpass, becomepass) = self.ask_passwords()
+ passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
+
+ # create base objects
+ loader, inventory, variable_manager = self._play_prereqs()
+
+ # (which is not returned in list_hosts()) is taken into account for
+ # warning if inventory is empty. But it can't be taken into account for
+ # checking if limit doesn't match any hosts. Instead we don't worry about
+ # limit if only implicit localhost was in inventory to start with.
+ #
+ # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
+ CLI.get_host_list(inventory, context.CLIARGS['subset'])
+
+ # flush fact cache if requested
+ if context.CLIARGS['flush_cache']:
+ self._flush_cache(inventory, variable_manager)
+
+ # create the playbook executor, which manages running the plays via a task queue manager
+ pbex = PlaybookExecutor(playbooks=context.CLIARGS['args'], inventory=inventory,
+ variable_manager=variable_manager, loader=loader,
+ passwords=passwords)
+
+ results = pbex.run()
+
+ if isinstance(results, list):
+ for p in results:
+
+ display.display('\nplaybook: %s' % p['playbook'])
+ for idx, play in enumerate(p['plays']):
+ if play._included_path is not None:
+ loader.set_basedir(play._included_path)
+ else:
+ pb_dir = os.path.realpath(os.path.dirname(p['playbook']))
+ loader.set_basedir(pb_dir)
+
+ # show host list if we were able to template into a list
+ try:
+ host_list = ','.join(play.hosts)
+ except TypeError:
+ host_list = ''
+
+ msg = "\n play #%d (%s): %s" % (idx + 1, host_list, play.name)
+ mytags = set(play.tags)
+ msg += '\tTAGS: [%s]' % (','.join(mytags))
+
+ if context.CLIARGS['listhosts']:
+ playhosts = set(inventory.get_hosts(play.hosts))
+ msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts))
+ for host in playhosts:
+ msg += "\n %s" % host
+
+ display.display(msg)
+
+ all_tags = set()
+ if context.CLIARGS['listtags'] or context.CLIARGS['listtasks']:
+ taskmsg = ''
+ if context.CLIARGS['listtasks']:
+ taskmsg = ' tasks:\n'
+
+ def _process_block(b):
+ taskmsg = ''
+ for task in b.block:
+ if isinstance(task, Block):
+ taskmsg += _process_block(task)
+ else:
+ if task.action in C._ACTION_META and task.implicit:
+ continue
+
+ all_tags.update(task.tags)
+ if context.CLIARGS['listtasks']:
+ cur_tags = list(mytags.union(set(task.tags)))
+ cur_tags.sort()
+ if task.name:
+ taskmsg += " %s" % task.get_name()
+ else:
+ taskmsg += " %s" % task.action
+ taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags)
+
+ return taskmsg
+
+ all_vars = variable_manager.get_vars(play=play)
+ for block in play.compile():
+ block = block.filter_tagged_tasks(all_vars)
+ if not block.has_tasks():
+ continue
+ taskmsg += _process_block(block)
+
+ if context.CLIARGS['listtags']:
+ cur_tags = list(mytags.union(all_tags))
+ cur_tags.sort()
+ taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags)
+
+ display.display(taskmsg)
+
+ return 0
+ else:
+ return results
+
+ @staticmethod
+ def _flush_cache(inventory, variable_manager):
+ for host in inventory.list_hosts():
+ hostname = host.get_name()
+ variable_manager.clear_facts(hostname)
+
+
+def main(args=None):
+ PlaybookCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/bin/ansible-pull b/bin/ansible-pull
new file mode 100755
index 0000000..dc8f055
--- /dev/null
+++ b/bin/ansible-pull
@@ -0,0 +1,364 @@
+#!/usr/bin/env python
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import datetime
+import os
+import platform
+import random
+import shlex
+import shutil
+import socket
+import sys
+import time
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleOptionsError
+from ansible.module_utils._text import to_native, to_text
+from ansible.plugins.loader import module_loader
+from ansible.utils.cmd_functions import run_cmd
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class PullCLI(CLI):
+ ''' Used to pull a remote copy of ansible on each managed node,
+ each set to run via cron and update playbook source via a source repository.
+ This inverts the default *push* architecture of ansible into a *pull* architecture,
+ which has near-limitless scaling potential.
+
+ None of the CLI tools are designed to run concurrently with themselves,
+ you should use an external scheduler and/or locking to ensure there are no clashing operations.
+
+ The setup playbook can be tuned to change the cron frequency, logging locations, and parameters to ansible-pull.
+ This is useful both for extreme scale-out as well as periodic remediation.
+ Usage of the 'fetch' module to retrieve logs from ansible-pull runs would be an
+ excellent way to gather and analyze remote logs from ansible-pull.
+ '''
+
+ name = 'ansible-pull'
+
+ DEFAULT_REPO_TYPE = 'git'
+ DEFAULT_PLAYBOOK = 'local.yml'
+ REPO_CHOICES = ('git', 'subversion', 'hg', 'bzr')
+ PLAYBOOK_ERRORS = {
+ 1: 'File does not exist',
+ 2: 'File is not readable',
+ }
+ ARGUMENTS = {'playbook.yml': 'The name of one the YAML format files to run as an Ansible playbook.'
+ 'This can be a relative path within the checkout. By default, Ansible will'
+ "look for a playbook based on the host's fully-qualified domain name,"
+ 'on the host hostname and finally a playbook named *local.yml*.', }
+
+ SKIP_INVENTORY_DEFAULTS = True
+
+ @staticmethod
+ def _get_inv_cli():
+ inv_opts = ''
+ if context.CLIARGS.get('inventory', False):
+ for inv in context.CLIARGS['inventory']:
+ if isinstance(inv, list):
+ inv_opts += " -i '%s' " % ','.join(inv)
+ elif ',' in inv or os.path.exists(inv):
+ inv_opts += ' -i %s ' % inv
+
+ return inv_opts
+
+ def init_parser(self):
+ ''' create an options parser for bin/ansible '''
+
+ super(PullCLI, self).init_parser(
+ usage='%prog -U <repository> [options] [<playbook.yml>]',
+ desc="pulls playbooks from a VCS repo and executes them for the local host")
+
+ # Do not add check_options as there's a conflict with --checkout/-C
+ opt_help.add_connect_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
+ opt_help.add_subset_options(self.parser)
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_module_options(self.parser)
+ opt_help.add_runas_prompt_options(self.parser)
+
+ self.parser.add_argument('args', help='Playbook(s)', metavar='playbook.yml', nargs='*')
+
+ # options unique to pull
+ self.parser.add_argument('--purge', default=False, action='store_true', help='purge checkout after playbook run')
+ self.parser.add_argument('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
+ help='only run the playbook if the repository has been updated')
+ self.parser.add_argument('-s', '--sleep', dest='sleep', default=None,
+ help='sleep for random interval (between 0 and n number of seconds) before starting. '
+ 'This is a useful way to disperse git requests')
+ self.parser.add_argument('-f', '--force', dest='force', default=False, action='store_true',
+ help='run the playbook even if the repository could not be updated')
+ self.parser.add_argument('-d', '--directory', dest='dest', default=None,
+ help='absolute path of repository checkout directory (relative paths are not supported)')
+ self.parser.add_argument('-U', '--url', dest='url', default=None, help='URL of the playbook repository')
+ self.parser.add_argument('--full', dest='fullclone', action='store_true', help='Do a full clone, instead of a shallow one.')
+ self.parser.add_argument('-C', '--checkout', dest='checkout',
+ help='branch/tag/commit to checkout. Defaults to behavior of repository module.')
+ self.parser.add_argument('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
+ help='adds the hostkey for the repo url if not already added')
+ self.parser.add_argument('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE,
+ help='Repository module name, which ansible will use to check out the repo. Choices are %s. Default is %s.'
+ % (self.REPO_CHOICES, self.DEFAULT_REPO_TYPE))
+ self.parser.add_argument('--verify-commit', dest='verify', default=False, action='store_true',
+ help='verify GPG signature of checked out commit, if it fails abort running the playbook. '
+ 'This needs the corresponding VCS module to support such an operation')
+ self.parser.add_argument('--clean', dest='clean', default=False, action='store_true',
+ help='modified files in the working repository will be discarded')
+ self.parser.add_argument('--track-subs', dest='tracksubs', default=False, action='store_true',
+ help='submodules will track the latest changes. This is equivalent to specifying the --remote flag to git submodule update')
+ # add a subset of the check_opts flag group manually, as the full set's
+ # shortcodes conflict with above --checkout/-C
+ self.parser.add_argument("--check", default=False, dest='check', action='store_true',
+ help="don't make any changes; instead, try to predict some of the changes that may occur")
+ self.parser.add_argument("--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
+ help="when changing (small) files and templates, show the differences in those files; works great with --check")
+
+ def post_process_args(self, options):
+ options = super(PullCLI, self).post_process_args(options)
+
+ if not options.dest:
+ hostname = socket.getfqdn()
+ # use a hostname dependent directory, in case of $HOME on nfs
+ options.dest = os.path.join(C.ANSIBLE_HOME, 'pull', hostname)
+ options.dest = os.path.expandvars(os.path.expanduser(options.dest))
+
+ if os.path.exists(options.dest) and not os.path.isdir(options.dest):
+ raise AnsibleOptionsError("%s is not a valid or accessible directory." % options.dest)
+
+ if options.sleep:
+ try:
+ secs = random.randint(0, int(options.sleep))
+ options.sleep = secs
+ except ValueError:
+ raise AnsibleOptionsError("%s is not a number." % options.sleep)
+
+ if not options.url:
+ raise AnsibleOptionsError("URL for repository not specified, use -h for help")
+
+ if options.module_name not in self.REPO_CHOICES:
+ raise AnsibleOptionsError("Unsupported repo module %s, choices are %s" % (options.module_name, ','.join(self.REPO_CHOICES)))
+
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options)
+
+ return options
+
+ def run(self):
+ ''' use Runner lib to do SSH things '''
+
+ super(PullCLI, self).run()
+
+ # log command line
+ now = datetime.datetime.now()
+ display.display(now.strftime("Starting Ansible Pull at %F %T"))
+ display.display(' '.join(sys.argv))
+
+ # Build Checkout command
+ # Now construct the ansible command
+ node = platform.node()
+ host = socket.getfqdn()
+ hostnames = ','.join(set([host, node, host.split('.')[0], node.split('.')[0]]))
+ if hostnames:
+ limit_opts = 'localhost,%s,127.0.0.1' % hostnames
+ else:
+ limit_opts = 'localhost,127.0.0.1'
+ base_opts = '-c local '
+ if context.CLIARGS['verbosity'] > 0:
+ base_opts += ' -%s' % ''.join(["v" for x in range(0, context.CLIARGS['verbosity'])])
+
+ # Attempt to use the inventory passed in as an argument
+ # It might not yet have been downloaded so use localhost as default
+ inv_opts = self._get_inv_cli()
+ if not inv_opts:
+ inv_opts = " -i localhost, "
+ # avoid interpreter discovery since we already know which interpreter to use on localhost
+ inv_opts += '-e %s ' % shlex.quote('ansible_python_interpreter=%s' % sys.executable)
+
+ # SCM specific options
+ if context.CLIARGS['module_name'] == 'git':
+ repo_opts = "name=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
+ if context.CLIARGS['checkout']:
+ repo_opts += ' version=%s' % context.CLIARGS['checkout']
+
+ if context.CLIARGS['accept_host_key']:
+ repo_opts += ' accept_hostkey=yes'
+
+ if context.CLIARGS['private_key_file']:
+ repo_opts += ' key_file=%s' % context.CLIARGS['private_key_file']
+
+ if context.CLIARGS['verify']:
+ repo_opts += ' verify_commit=yes'
+
+ if context.CLIARGS['tracksubs']:
+ repo_opts += ' track_submodules=yes'
+
+ if not context.CLIARGS['fullclone']:
+ repo_opts += ' depth=1'
+ elif context.CLIARGS['module_name'] == 'subversion':
+ repo_opts = "repo=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
+ if context.CLIARGS['checkout']:
+ repo_opts += ' revision=%s' % context.CLIARGS['checkout']
+ if not context.CLIARGS['fullclone']:
+ repo_opts += ' export=yes'
+ elif context.CLIARGS['module_name'] == 'hg':
+ repo_opts = "repo=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
+ if context.CLIARGS['checkout']:
+ repo_opts += ' revision=%s' % context.CLIARGS['checkout']
+ elif context.CLIARGS['module_name'] == 'bzr':
+ repo_opts = "name=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
+ if context.CLIARGS['checkout']:
+ repo_opts += ' version=%s' % context.CLIARGS['checkout']
+ else:
+ raise AnsibleOptionsError('Unsupported (%s) SCM module for pull, choices are: %s'
+ % (context.CLIARGS['module_name'],
+ ','.join(self.REPO_CHOICES)))
+
+ # options common to all supported SCMS
+ if context.CLIARGS['clean']:
+ repo_opts += ' force=yes'
+
+ path = module_loader.find_plugin(context.CLIARGS['module_name'])
+ if path is None:
+ raise AnsibleOptionsError(("module '%s' not found.\n" % context.CLIARGS['module_name']))
+
+ bin_path = os.path.dirname(os.path.abspath(sys.argv[0]))
+ # hardcode local and inventory/host as this is just meant to fetch the repo
+ cmd = '%s/ansible %s %s -m %s -a "%s" all -l "%s"' % (bin_path, inv_opts, base_opts,
+ context.CLIARGS['module_name'],
+ repo_opts, limit_opts)
+ for ev in context.CLIARGS['extra_vars']:
+ cmd += ' -e %s' % shlex.quote(ev)
+
+ # Nap?
+ if context.CLIARGS['sleep']:
+ display.display("Sleeping for %d seconds..." % context.CLIARGS['sleep'])
+ time.sleep(context.CLIARGS['sleep'])
+
+ # RUN the Checkout command
+ display.debug("running ansible with VCS module to checkout repo")
+ display.vvvv('EXEC: %s' % cmd)
+ rc, b_out, b_err = run_cmd(cmd, live=True)
+
+ if rc != 0:
+ if context.CLIARGS['force']:
+ display.warning("Unable to update repository. Continuing with (forced) run of playbook.")
+ else:
+ return rc
+ elif context.CLIARGS['ifchanged'] and b'"changed": true' not in b_out:
+ display.display("Repository has not changed, quitting.")
+ return 0
+
+ playbook = self.select_playbook(context.CLIARGS['dest'])
+ if playbook is None:
+ raise AnsibleOptionsError("Could not find a playbook to run.")
+
+ # Build playbook command
+ cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
+ if context.CLIARGS['vault_password_files']:
+ for vault_password_file in context.CLIARGS['vault_password_files']:
+ cmd += " --vault-password-file=%s" % vault_password_file
+ if context.CLIARGS['vault_ids']:
+ for vault_id in context.CLIARGS['vault_ids']:
+ cmd += " --vault-id=%s" % vault_id
+
+ for ev in context.CLIARGS['extra_vars']:
+ cmd += ' -e %s' % shlex.quote(ev)
+ if context.CLIARGS['become_ask_pass']:
+ cmd += ' --ask-become-pass'
+ if context.CLIARGS['skip_tags']:
+ cmd += ' --skip-tags "%s"' % to_native(u','.join(context.CLIARGS['skip_tags']))
+ if context.CLIARGS['tags']:
+ cmd += ' -t "%s"' % to_native(u','.join(context.CLIARGS['tags']))
+ if context.CLIARGS['subset']:
+ cmd += ' -l "%s"' % context.CLIARGS['subset']
+ else:
+ cmd += ' -l "%s"' % limit_opts
+ if context.CLIARGS['check']:
+ cmd += ' -C'
+ if context.CLIARGS['diff']:
+ cmd += ' -D'
+
+ os.chdir(context.CLIARGS['dest'])
+
+ # redo inventory options as new files might exist now
+ inv_opts = self._get_inv_cli()
+ if inv_opts:
+ cmd += inv_opts
+
+ # RUN THE PLAYBOOK COMMAND
+ display.debug("running ansible-playbook to do actual work")
+ display.debug('EXEC: %s' % cmd)
+ rc, b_out, b_err = run_cmd(cmd, live=True)
+
+ if context.CLIARGS['purge']:
+ os.chdir('/')
+ try:
+ shutil.rmtree(context.CLIARGS['dest'])
+ except Exception as e:
+ display.error(u"Failed to remove %s: %s" % (context.CLIARGS['dest'], to_text(e)))
+
+ return rc
+
+ @staticmethod
+ def try_playbook(path):
+ if not os.path.exists(path):
+ return 1
+ if not os.access(path, os.R_OK):
+ return 2
+ return 0
+
+ @staticmethod
+ def select_playbook(path):
+ playbook = None
+ errors = []
+ if context.CLIARGS['args'] and context.CLIARGS['args'][0] is not None:
+ playbooks = []
+ for book in context.CLIARGS['args']:
+ book_path = os.path.join(path, book)
+ rc = PullCLI.try_playbook(book_path)
+ if rc != 0:
+ errors.append("%s: %s" % (book_path, PullCLI.PLAYBOOK_ERRORS[rc]))
+ continue
+ playbooks.append(book_path)
+ if 0 < len(errors):
+ display.warning("\n".join(errors))
+ elif len(playbooks) == len(context.CLIARGS['args']):
+ playbook = " ".join(playbooks)
+ return playbook
+ else:
+ fqdn = socket.getfqdn()
+ hostpb = os.path.join(path, fqdn + '.yml')
+ shorthostpb = os.path.join(path, fqdn.split('.')[0] + '.yml')
+ localpb = os.path.join(path, PullCLI.DEFAULT_PLAYBOOK)
+ for pb in [hostpb, shorthostpb, localpb]:
+ rc = PullCLI.try_playbook(pb)
+ if rc == 0:
+ playbook = pb
+ break
+ else:
+ errors.append("%s: %s" % (pb, PullCLI.PLAYBOOK_ERRORS[rc]))
+ if playbook is None:
+ display.warning("\n".join(errors))
+ return playbook
+
+
+def main(args=None):
+ PullCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/bin/ansible-test b/bin/ansible-test
new file mode 100755
index 0000000..930654f
--- /dev/null
+++ b/bin/ansible-test
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# PYTHON_ARGCOMPLETE_OK
+"""Command line entry point for ansible-test."""
+
+# NOTE: This file resides in the _util/target directory to ensure compatibility with all supported Python versions.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main(args=None):
+ """Main program entry point."""
+ ansible_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ source_root = os.path.join(ansible_root, 'test', 'lib')
+
+ if os.path.exists(os.path.join(source_root, 'ansible_test', '_internal', '__init__.py')):
+ # running from source, use that version of ansible-test instead of any version that may already be installed
+ sys.path.insert(0, source_root)
+
+ # noinspection PyProtectedMember
+ from ansible_test._util.target.common.constants import CONTROLLER_PYTHON_VERSIONS
+
+ if version_to_str(sys.version_info[:2]) not in CONTROLLER_PYTHON_VERSIONS:
+ raise SystemExit('This version of ansible-test cannot be executed with Python version %s. Supported Python versions are: %s' % (
+ version_to_str(sys.version_info[:3]), ', '.join(CONTROLLER_PYTHON_VERSIONS)))
+
+ if any(not os.get_blocking(handle.fileno()) for handle in (sys.stdin, sys.stdout, sys.stderr)):
+ raise SystemExit('Standard input, output and error file handles must be blocking to run ansible-test.')
+
+ # noinspection PyProtectedMember
+ from ansible_test._internal import main as cli_main
+
+ cli_main(args)
+
+
+def version_to_str(version):
+ """Return a version string from a version tuple."""
+ return '.'.join(str(n) for n in version)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/bin/ansible-vault b/bin/ansible-vault
new file mode 100755
index 0000000..3e60329
--- /dev/null
+++ b/bin/ansible-vault
@@ -0,0 +1,480 @@
+#!/usr/bin/env python
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import os
+import sys
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleOptionsError
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.parsing.dataloader import DataLoader
+from ansible.parsing.vault import VaultEditor, VaultLib, match_encrypt_secret
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class VaultCLI(CLI):
+ ''' can encrypt any structured data file used by Ansible.
+ This can include *group_vars/* or *host_vars/* inventory variables,
+ variables loaded by *include_vars* or *vars_files*, or variable files
+ passed on the ansible-playbook command line with *-e @file.yml* or *-e @file.json*.
+ Role variables and defaults are also included!
+
+ Because Ansible tasks, handlers, and other objects are data, these can also be encrypted with vault.
+ If you'd like to not expose what variables you are using, you can keep an individual task file entirely encrypted.
+ '''
+
+ name = 'ansible-vault'
+
+ FROM_STDIN = "stdin"
+ FROM_ARGS = "the command line args"
+ FROM_PROMPT = "the interactive prompt"
+
+ def __init__(self, args):
+
+ self.b_vault_pass = None
+ self.b_new_vault_pass = None
+ self.encrypt_string_read_stdin = False
+
+ self.encrypt_secret = None
+ self.encrypt_vault_id = None
+ self.new_encrypt_secret = None
+ self.new_encrypt_vault_id = None
+
+ super(VaultCLI, self).__init__(args)
+
+ def init_parser(self):
+ super(VaultCLI, self).init_parser(
+ desc="encryption/decryption utility for Ansible data files",
+ epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
+ )
+
+ common = opt_help.argparse.ArgumentParser(add_help=False)
+ opt_help.add_vault_options(common)
+ opt_help.add_verbosity_options(common)
+
+ subparsers = self.parser.add_subparsers(dest='action')
+ subparsers.required = True
+
+ output = opt_help.argparse.ArgumentParser(add_help=False)
+ output.add_argument('--output', default=None, dest='output_file',
+ help='output file name for encrypt or decrypt; use - for stdout',
+ type=opt_help.unfrack_path())
+
+ # For encrypting actions, we can also specify which of multiple vault ids should be used for encrypting
+ vault_id = opt_help.argparse.ArgumentParser(add_help=False)
+ vault_id.add_argument('--encrypt-vault-id', default=[], dest='encrypt_vault_id',
+ action='store', type=str,
+ help='the vault id used to encrypt (required if more than one vault-id is provided)')
+
+ create_parser = subparsers.add_parser('create', help='Create new vault encrypted file', parents=[vault_id, common])
+ create_parser.set_defaults(func=self.execute_create)
+ create_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ decrypt_parser = subparsers.add_parser('decrypt', help='Decrypt vault encrypted file', parents=[output, common])
+ decrypt_parser.set_defaults(func=self.execute_decrypt)
+ decrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ edit_parser = subparsers.add_parser('edit', help='Edit vault encrypted file', parents=[vault_id, common])
+ edit_parser.set_defaults(func=self.execute_edit)
+ edit_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ view_parser = subparsers.add_parser('view', help='View vault encrypted file', parents=[common])
+ view_parser.set_defaults(func=self.execute_view)
+ view_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ encrypt_parser = subparsers.add_parser('encrypt', help='Encrypt YAML file', parents=[common, output, vault_id])
+ encrypt_parser.set_defaults(func=self.execute_encrypt)
+ encrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ enc_str_parser = subparsers.add_parser('encrypt_string', help='Encrypt a string', parents=[common, output, vault_id])
+ enc_str_parser.set_defaults(func=self.execute_encrypt_string)
+ enc_str_parser.add_argument('args', help='String to encrypt', metavar='string_to_encrypt', nargs='*')
+ enc_str_parser.add_argument('-p', '--prompt', dest='encrypt_string_prompt',
+ action='store_true',
+ help="Prompt for the string to encrypt")
+ enc_str_parser.add_argument('--show-input', dest='show_string_input', default=False, action='store_true',
+ help='Do not hide input when prompted for the string to encrypt')
+ enc_str_parser.add_argument('-n', '--name', dest='encrypt_string_names',
+ action='append',
+ help="Specify the variable name")
+ enc_str_parser.add_argument('--stdin-name', dest='encrypt_string_stdin_name',
+ default=None,
+ help="Specify the variable name for stdin")
+
+ rekey_parser = subparsers.add_parser('rekey', help='Re-key a vault encrypted file', parents=[common, vault_id])
+ rekey_parser.set_defaults(func=self.execute_rekey)
+ rekey_new_group = rekey_parser.add_mutually_exclusive_group()
+ rekey_new_group.add_argument('--new-vault-password-file', default=None, dest='new_vault_password_file',
+ help="new vault password file for rekey", type=opt_help.unfrack_path())
+ rekey_new_group.add_argument('--new-vault-id', default=None, dest='new_vault_id', type=str,
+ help='the new vault identity to use for rekey')
+ rekey_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ def post_process_args(self, options):
+ options = super(VaultCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+
+ if options.vault_ids:
+ for vault_id in options.vault_ids:
+ if u';' in vault_id:
+ raise AnsibleOptionsError("'%s' is not a valid vault id. The character ';' is not allowed in vault ids" % vault_id)
+
+ if getattr(options, 'output_file', None) and len(options.args) > 1:
+ raise AnsibleOptionsError("At most one input file may be used with the --output option")
+
+ if options.action == 'encrypt_string':
+ if '-' in options.args or not options.args or options.encrypt_string_stdin_name:
+ self.encrypt_string_read_stdin = True
+
+ # TODO: prompting from stdin and reading from stdin seem mutually exclusive, but verify that.
+ if options.encrypt_string_prompt and self.encrypt_string_read_stdin:
+ raise AnsibleOptionsError('The --prompt option is not supported if also reading input from stdin')
+
+ return options
+
+ def run(self):
+ super(VaultCLI, self).run()
+ loader = DataLoader()
+
+ # set default restrictive umask
+ old_umask = os.umask(0o077)
+
+ vault_ids = list(context.CLIARGS['vault_ids'])
+
+ # there are 3 types of actions, those that just 'read' (decrypt, view) and only
+ # need to ask for a password once, and those that 'write' (create, encrypt) that
+ # ask for a new password and confirm it, and 'read/write (rekey) that asks for the
+ # old password, then asks for a new one and confirms it.
+
+ default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
+ vault_ids = default_vault_ids + vault_ids
+
+ action = context.CLIARGS['action']
+
+ # TODO: instead of prompting for these before, we could let VaultEditor
+ # call a callback when it needs it.
+ if action in ['decrypt', 'view', 'rekey', 'edit']:
+ vault_secrets = self.setup_vault_secrets(loader, vault_ids=vault_ids,
+ vault_password_files=list(context.CLIARGS['vault_password_files']),
+ ask_vault_pass=context.CLIARGS['ask_vault_pass'])
+ if not vault_secrets:
+ raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
+
+ if action in ['encrypt', 'encrypt_string', 'create']:
+
+ encrypt_vault_id = None
+ # no --encrypt-vault-id context.CLIARGS['encrypt_vault_id'] for 'edit'
+ if action not in ['edit']:
+ encrypt_vault_id = context.CLIARGS['encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
+
+ vault_secrets = None
+ vault_secrets = \
+ self.setup_vault_secrets(loader,
+ vault_ids=vault_ids,
+ vault_password_files=list(context.CLIARGS['vault_password_files']),
+ ask_vault_pass=context.CLIARGS['ask_vault_pass'],
+ create_new_password=True)
+
+ if len(vault_secrets) > 1 and not encrypt_vault_id:
+ raise AnsibleOptionsError("The vault-ids %s are available to encrypt. Specify the vault-id to encrypt with --encrypt-vault-id" %
+ ','.join([x[0] for x in vault_secrets]))
+
+ if not vault_secrets:
+ raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
+
+ encrypt_secret = match_encrypt_secret(vault_secrets,
+ encrypt_vault_id=encrypt_vault_id)
+
+ # only one secret for encrypt for now, use the first vault_id and use its first secret
+ # TODO: exception if more than one?
+ self.encrypt_vault_id = encrypt_secret[0]
+ self.encrypt_secret = encrypt_secret[1]
+
+ if action in ['rekey']:
+ encrypt_vault_id = context.CLIARGS['encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
+ # print('encrypt_vault_id: %s' % encrypt_vault_id)
+ # print('default_encrypt_vault_id: %s' % default_encrypt_vault_id)
+
+ # new_vault_ids should only ever be one item, from
+ # load the default vault ids if we are using encrypt-vault-id
+ new_vault_ids = []
+ if encrypt_vault_id:
+ new_vault_ids = default_vault_ids
+ if context.CLIARGS['new_vault_id']:
+ new_vault_ids.append(context.CLIARGS['new_vault_id'])
+
+ new_vault_password_files = []
+ if context.CLIARGS['new_vault_password_file']:
+ new_vault_password_files.append(context.CLIARGS['new_vault_password_file'])
+
+ new_vault_secrets = \
+ self.setup_vault_secrets(loader,
+ vault_ids=new_vault_ids,
+ vault_password_files=new_vault_password_files,
+ ask_vault_pass=context.CLIARGS['ask_vault_pass'],
+ create_new_password=True)
+
+ if not new_vault_secrets:
+ raise AnsibleOptionsError("A new vault password is required to use Ansible's Vault rekey")
+
+ # There is only one new_vault_id currently and one new_vault_secret, or we
+ # use the id specified in --encrypt-vault-id
+ new_encrypt_secret = match_encrypt_secret(new_vault_secrets,
+ encrypt_vault_id=encrypt_vault_id)
+
+ self.new_encrypt_vault_id = new_encrypt_secret[0]
+ self.new_encrypt_secret = new_encrypt_secret[1]
+
+ loader.set_vault_secrets(vault_secrets)
+
+ # FIXME: do we need to create VaultEditor here? its not reused
+ vault = VaultLib(vault_secrets)
+ self.editor = VaultEditor(vault)
+
+ context.CLIARGS['func']()
+
+ # and restore umask
+ os.umask(old_umask)
+
+ def execute_encrypt(self):
+ ''' encrypt the supplied file using the provided vault secret '''
+
+ if not context.CLIARGS['args'] and sys.stdin.isatty():
+ display.display("Reading plaintext input from stdin", stderr=True)
+
+ for f in context.CLIARGS['args'] or ['-']:
+ # Fixme: use the correct vau
+ self.editor.encrypt_file(f, self.encrypt_secret,
+ vault_id=self.encrypt_vault_id,
+ output_file=context.CLIARGS['output_file'])
+
+ if sys.stdout.isatty():
+ display.display("Encryption successful", stderr=True)
+
+ @staticmethod
+ def format_ciphertext_yaml(b_ciphertext, indent=None, name=None):
+ indent = indent or 10
+
+ block_format_var_name = ""
+ if name:
+ block_format_var_name = "%s: " % name
+
+ block_format_header = "%s!vault |" % block_format_var_name
+ lines = []
+ vault_ciphertext = to_text(b_ciphertext)
+
+ lines.append(block_format_header)
+ for line in vault_ciphertext.splitlines():
+ lines.append('%s%s' % (' ' * indent, line))
+
+ yaml_ciphertext = '\n'.join(lines)
+ return yaml_ciphertext
+
+ def execute_encrypt_string(self):
+ ''' encrypt the supplied string using the provided vault secret '''
+ b_plaintext = None
+
+ # Holds tuples (the_text, the_source_of_the_string, the variable name if its provided).
+ b_plaintext_list = []
+
+ # remove the non-option '-' arg (used to indicate 'read from stdin') from the candidate args so
+ # we don't add it to the plaintext list
+ args = [x for x in context.CLIARGS['args'] if x != '-']
+
+ # We can prompt and read input, or read from stdin, but not both.
+ if context.CLIARGS['encrypt_string_prompt']:
+ msg = "String to encrypt: "
+
+ name = None
+ name_prompt_response = display.prompt('Variable name (enter for no name): ')
+
+ # TODO: enforce var naming rules?
+ if name_prompt_response != "":
+ name = name_prompt_response
+
+ # TODO: could prompt for which vault_id to use for each plaintext string
+ # currently, it will just be the default
+ hide_input = not context.CLIARGS['show_string_input']
+ if hide_input:
+ msg = "String to encrypt (hidden): "
+ else:
+ msg = "String to encrypt:"
+
+ prompt_response = display.prompt(msg, private=hide_input)
+
+ if prompt_response == '':
+ raise AnsibleOptionsError('The plaintext provided from the prompt was empty, not encrypting')
+
+ b_plaintext = to_bytes(prompt_response)
+ b_plaintext_list.append((b_plaintext, self.FROM_PROMPT, name))
+
+ # read from stdin
+ if self.encrypt_string_read_stdin:
+ if sys.stdout.isatty():
+ display.display("Reading plaintext input from stdin. (ctrl-d to end input, twice if your content does not already have a newline)", stderr=True)
+
+ stdin_text = sys.stdin.read()
+ if stdin_text == '':
+ raise AnsibleOptionsError('stdin was empty, not encrypting')
+
+ if sys.stdout.isatty() and not stdin_text.endswith("\n"):
+ display.display("\n")
+
+ b_plaintext = to_bytes(stdin_text)
+
+ # defaults to None
+ name = context.CLIARGS['encrypt_string_stdin_name']
+ b_plaintext_list.append((b_plaintext, self.FROM_STDIN, name))
+
+ # use any leftover args as strings to encrypt
+ # Try to match args up to --name options
+ if context.CLIARGS.get('encrypt_string_names', False):
+ name_and_text_list = list(zip(context.CLIARGS['encrypt_string_names'], args))
+
+ # Some but not enough --name's to name each var
+ if len(args) > len(name_and_text_list):
+ # Trying to avoid ever showing the plaintext in the output, so this warning is vague to avoid that.
+ display.display('The number of --name options do not match the number of args.',
+ stderr=True)
+ display.display('The last named variable will be "%s". The rest will not have'
+ ' names.' % context.CLIARGS['encrypt_string_names'][-1],
+ stderr=True)
+
+ # Add the rest of the args without specifying a name
+ for extra_arg in args[len(name_and_text_list):]:
+ name_and_text_list.append((None, extra_arg))
+
+ # if no --names are provided, just use the args without a name.
+ else:
+ name_and_text_list = [(None, x) for x in args]
+
+ # Convert the plaintext text objects to bytestrings and collect
+ for name_and_text in name_and_text_list:
+ name, plaintext = name_and_text
+
+ if plaintext == '':
+ raise AnsibleOptionsError('The plaintext provided from the command line args was empty, not encrypting')
+
+ b_plaintext = to_bytes(plaintext)
+ b_plaintext_list.append((b_plaintext, self.FROM_ARGS, name))
+
+ # TODO: specify vault_id per string?
+ # Format the encrypted strings and any corresponding stderr output
+ outputs = self._format_output_vault_strings(b_plaintext_list, vault_id=self.encrypt_vault_id)
+
+ b_outs = []
+ for output in outputs:
+ err = output.get('err', None)
+ out = output.get('out', '')
+ if err:
+ sys.stderr.write(err)
+ b_outs.append(to_bytes(out))
+
+ self.editor.write_data(b'\n'.join(b_outs), context.CLIARGS['output_file'] or '-')
+
+ if sys.stdout.isatty():
+ display.display("Encryption successful", stderr=True)
+
+ # TODO: offer block or string ala eyaml
+
+ def _format_output_vault_strings(self, b_plaintext_list, vault_id=None):
+ # If we are only showing one item in the output, we don't need to included commented
+ # delimiters in the text
+ show_delimiter = False
+ if len(b_plaintext_list) > 1:
+ show_delimiter = True
+
+ # list of dicts {'out': '', 'err': ''}
+ output = []
+
+ # Encrypt the plaintext, and format it into a yaml block that can be pasted into a playbook.
+ # For more than one input, show some differentiating info in the stderr output so we can tell them
+ # apart. If we have a var name, we include that in the yaml
+ for index, b_plaintext_info in enumerate(b_plaintext_list):
+ # (the text itself, which input it came from, its name)
+ b_plaintext, src, name = b_plaintext_info
+
+ b_ciphertext = self.editor.encrypt_bytes(b_plaintext, self.encrypt_secret, vault_id=vault_id)
+
+ # block formatting
+ yaml_text = self.format_ciphertext_yaml(b_ciphertext, name=name)
+
+ err_msg = None
+ if show_delimiter:
+ human_index = index + 1
+ if name:
+ err_msg = '# The encrypted version of variable ("%s", the string #%d from %s).\n' % (name, human_index, src)
+ else:
+ err_msg = '# The encrypted version of the string #%d from %s.)\n' % (human_index, src)
+ output.append({'out': yaml_text, 'err': err_msg})
+
+ return output
+
+ def execute_decrypt(self):
+ ''' decrypt the supplied file using the provided vault secret '''
+
+ if not context.CLIARGS['args'] and sys.stdin.isatty():
+ display.display("Reading ciphertext input from stdin", stderr=True)
+
+ for f in context.CLIARGS['args'] or ['-']:
+ self.editor.decrypt_file(f, output_file=context.CLIARGS['output_file'])
+
+ if sys.stdout.isatty():
+ display.display("Decryption successful", stderr=True)
+
+ def execute_create(self):
+ ''' create and open a file in an editor that will be encrypted with the provided vault secret when closed'''
+
+ if len(context.CLIARGS['args']) != 1:
+ raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
+
+ self.editor.create_file(context.CLIARGS['args'][0], self.encrypt_secret,
+ vault_id=self.encrypt_vault_id)
+
+ def execute_edit(self):
+ ''' open and decrypt an existing vaulted file in an editor, that will be encrypted again when closed'''
+ for f in context.CLIARGS['args']:
+ self.editor.edit_file(f)
+
+ def execute_view(self):
+ ''' open, decrypt and view an existing vaulted file using a pager using the supplied vault secret '''
+
+ for f in context.CLIARGS['args']:
+ # Note: vault should return byte strings because it could encrypt
+ # and decrypt binary files. We are responsible for changing it to
+ # unicode here because we are displaying it and therefore can make
+ # the decision that the display doesn't have to be precisely what
+ # the input was (leave that to decrypt instead)
+ plaintext = self.editor.plaintext(f)
+ self.pager(to_text(plaintext))
+
+ def execute_rekey(self):
+ ''' re-encrypt a vaulted file with a new secret, the previous secret is required '''
+ for f in context.CLIARGS['args']:
+ # FIXME: plumb in vault_id, use the default new_vault_secret for now
+ self.editor.rekey_file(f, self.new_encrypt_secret,
+ self.new_encrypt_vault_id)
+
+ display.display("Rekey successful", stderr=True)
+
+
+def main(args=None):
+ VaultCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()